hive git commit: HIVE-21171: Skip creating scratch dirs for tez if RPC is on (Vineet Garg, reviewed by Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master 61d8a06b9 -> dfc4b8edb HIVE-21171: Skip creating scratch dirs for tez if RPC is on (Vineet Garg, reviewed by Ashutosh Chauhan) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dfc4b8ed Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dfc4b8ed Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dfc4b8ed Branch: refs/heads/master Commit: dfc4b8edbd1ad8c394634c67fbd1f06ba03e4d7f Parents: 61d8a06 Author: Vineet Garg Authored: Tue Jan 29 11:00:28 2019 -0800 Committer: Vineet Garg Committed: Tue Jan 29 11:00:28 2019 -0800 -- .../java/org/apache/hadoop/hive/ql/exec/Utilities.java | 7 +-- .../org/apache/hadoop/hive/ql/exec/tez/DagUtils.java | 13 - 2 files changed, 13 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/dfc4b8ed/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 2ff9ad3..8937b43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -667,8 +667,11 @@ public final class Utilities { // this is the unique conf ID, which is kept in JobConf as part of the plan file name String jobID = UUID.randomUUID().toString(); Path planPath = new Path(hiveScratchDir, jobID); - FileSystem fs = planPath.getFileSystem(conf); - fs.mkdirs(planPath); + if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { +FileSystem fs = planPath.getFileSystem(conf); +// since we are doing RPC creating a directory is un-necessary +fs.mkdirs(planPath); + } HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, planPath.toUri().toString()); } } http://git-wip-us.apache.org/repos/asf/hive/blob/dfc4b8ed/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index 1a88b77..d9340d0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -1500,11 +1500,14 @@ public class DagUtils { scratchDir = new Path(scratchDir, userName); Path tezDir = getTezDir(scratchDir); -FileSystem fs = tezDir.getFileSystem(conf); -LOG.debug("TezDir path set " + tezDir + " for user: " + userName); -// since we are adding the user name to the scratch dir, we do not -// need to give more permissions here -fs.mkdirs(tezDir); +if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { + FileSystem fs = tezDir.getFileSystem(conf); + LOG.debug("TezDir path set " + tezDir + " for user: " + userName); + // since we are adding the user name to the scratch dir, we do not + // need to give more permissions here + // Since we are doing RPC creating a dir is not necessary + fs.mkdirs(tezDir); +} return tezDir;
hive git commit: HIVE-21077 : Database and Catalogs should have creation time (addendum) (Vihang Karajgaonkar, reviewed by Naveen Gangam)
Repository: hive Updated Branches: refs/heads/branch-3 e6e11a902 -> 7065c92ef HIVE-21077 : Database and Catalogs should have creation time (addendum) (Vihang Karajgaonkar, reviewed by Naveen Gangam) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7065c92e Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7065c92e Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7065c92e Branch: refs/heads/branch-3 Commit: 7065c92ef2b037f0b95e85362f57c7836b334e47 Parents: e6e11a9 Author: Vihang Karajgaonkar Authored: Tue Jan 29 10:50:52 2019 -0800 Committer: Vihang Karajgaonkar Committed: Tue Jan 29 10:50:52 2019 -0800 -- .../src/main/sql/mssql/hive-schema-3.2.0.mssql.sql | 4 ++-- .../src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/7065c92e/standalone-metastore/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql -- diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql index b427f00..df0b0da 100644 --- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql +++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql @@ -286,7 +286,7 @@ CREATE TABLE DBS OWNER_NAME nvarchar(128) NULL, OWNER_TYPE nvarchar(10) NULL, CTLG_NAME nvarchar(256), -CREATE_TIME BIGINT +CREATE_TIME INT ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -699,7 +699,7 @@ CREATE TABLE CTLGS ( "NAME" nvarchar(256), "DESC" nvarchar(4000), LOCATION_URI nvarchar(4000) not null, - CREATE_TIME BIGINT + CREATE_TIME INT ); CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); http://git-wip-us.apache.org/repos/asf/hive/blob/7065c92e/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql -- diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql index 47a585c..a5bcf78 100644 --- a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql +++ b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql @@ -17,8 +17,8 @@ ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); -- HIVE-21077 -ALTER TABLE DBS ADD CREATE_TIME BIGINT; -ALTER TABLE CTLGS ADD CREATE_TIME BIGINT; +ALTER TABLE DBS ADD CREATE_TIME INT; +ALTER TABLE CTLGS ADD CREATE_TIME INT; -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.2.0', VERSION_COMMENT='Hive release version 3.2.0' where VER_ID=1;
hive git commit: HIVE-21077 : Database and Catalogs should have creation time (addendum) (Vihang Karajgaonkar, reviewed by Naveen Gangam)
Repository: hive Updated Branches: refs/heads/master 71dfd1d11 -> 61d8a06b9 HIVE-21077 : Database and Catalogs should have creation time (addendum) (Vihang Karajgaonkar, reviewed by Naveen Gangam) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/61d8a06b Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/61d8a06b Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/61d8a06b Branch: refs/heads/master Commit: 61d8a06b97e7df810d0aea399d1a9dcecb58bac1 Parents: 71dfd1d Author: Vihang Karajgaonkar Authored: Mon Jan 28 11:14:05 2019 -0800 Committer: Vihang Karajgaonkar Committed: Tue Jan 29 10:41:24 2019 -0800 -- .../src/main/sql/mssql/hive-schema-3.2.0.mssql.sql | 4 ++-- .../src/main/sql/mssql/hive-schema-4.0.0.mssql.sql | 4 ++-- .../src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/61d8a06b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql -- diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql index e120128..cd04b4c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-3.2.0.mssql.sql @@ -286,7 +286,7 @@ CREATE TABLE DBS OWNER_NAME nvarchar(128) NULL, OWNER_TYPE nvarchar(10) NULL, CTLG_NAME nvarchar(256), -CREATE_TIME BIGINT +CREATE_TIME INT ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -699,7 +699,7 @@ CREATE TABLE CTLGS ( "NAME" nvarchar(256), "DESC" nvarchar(4000), LOCATION_URI nvarchar(4000) not null, - CREATE_TIME BIGINT + CREATE_TIME INT ); CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); http://git-wip-us.apache.org/repos/asf/hive/blob/61d8a06b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql -- diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 895bf01..383d3bc 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -286,7 +286,7 @@ CREATE TABLE DBS OWNER_NAME nvarchar(128) NULL, OWNER_TYPE nvarchar(10) NULL, CTLG_NAME nvarchar(256), -CREATE_TIME BIGINT +CREATE_TIME INT ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -700,7 +700,7 @@ CREATE TABLE CTLGS ( "NAME" nvarchar(256), "DESC" nvarchar(4000), LOCATION_URI nvarchar(4000) not null, - CREATE_TIME BIGINT + CREATE_TIME INT ); CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); http://git-wip-us.apache.org/repos/asf/hive/blob/61d8a06b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql -- diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql index 803bf5e..fd78419 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql @@ -21,8 +21,8 @@ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop. ALTER TABLE "SERDE_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX); -- HIVE-21077 -ALTER TABLE DBS ADD CREATE_TIME BIGINT; -ALTER TABLE CTLGS ADD CREATE_TIME BIGINT; +ALTER TABLE DBS ADD CREATE_TIME INT; +ALTER TABLE CTLGS ADD CREATE_TIME INT; -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.2.0', VERSION_COMMENT='Hive release version 3.2.0' where VER_ID=1;
[06/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index f92e23e..4b38aeb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ import org.slf4j.LoggerFactory; case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); -struct.pools = new ArrayList(_list888.size); -WMPool _elem889; -for (int _i890 = 0; _i890 < _list888.size; ++_i890) +org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); +struct.pools = new ArrayList(_list904.size); +WMPool _elem905; +for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem889 = new WMPool(); - _elem889.read(iprot); - struct.pools.add(_elem889); + _elem905 = new WMPool(); + _elem905.read(iprot); + struct.pools.add(_elem905); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ import org.slf4j.LoggerFactory; case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list891 = iprot.readListBegin(); -struct.mappings = new ArrayList(_list891.size); -WMMapping _elem892; -for (int _i893 = 0; _i893 < _list891.size; ++_i893) +org.apache.thrift.protocol.TList _list907 = iprot.readListBegin(); +struct.mappings = new ArrayList(_list907.size); +WMMapping _elem908; +for (int _i909 = 0; _i909 < _list907.size; ++_i909) { - _elem892 = new WMMapping(); - _elem892.read(iprot); - struct.mappings.add(_elem892); + _elem908 = new WMMapping(); + _elem908.read(iprot); + struct.mappings.add(_elem908); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ import org.slf4j.LoggerFactory; case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list894 = iprot.readListBegin(); -struct.triggers = new ArrayList(_list894.size); -WMTrigger _elem895; -for (int _i896 = 0; _i896 < _list894.size; ++_i896) +org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); +struct.triggers = new ArrayList(_list910.size); +WMTrigger _elem911; +for (int _i912 = 0; _i912 < _list910.size; ++_i912) { - _elem895 = new WMTrigger(); - _elem895.read(iprot); - struct.triggers.add(_elem895); + _elem911 = new WMTrigger(); + _elem911.read(iprot); + struct.triggers.add(_elem911); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ import org.slf4j.LoggerFactory; case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list897 = iprot.readListBegin(); -struct.poolTriggers = new ArrayList(_list897.size); -WMPoolTrigger _elem898; -for (int _i899 = 0; _i899 < _list897.size; ++_i899) +org.apache.thrift.protocol.TList _list913 = iprot.readListBegin(); +struct.poolTriggers = new ArrayList(_list913.size); +WMPoolTrigger _elem914; +for (int _i915 = 0; _i915 < _list913.size; ++_i915) { - _elem898 = new WMPoolTrigger(); - _elem898.read(iprot); - struct.poolTriggers.add(_elem898); + _elem914 = new WMPoolTrigger(); + _elem914.read(iprot); + struct.poolTriggers.add(_e
[05/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index b94dd25..333a2d9 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -711,6 +711,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function get_partitions_by_names($db_name, $tbl_name, array $names); /** + * @param \metastore\GetPartitionsByNamesRequest $req + * @return \metastore\GetPartitionsByNamesResult + * @throws \metastore\MetaException + * @throws \metastore\NoSuchObjectException + */ + public function get_partitions_by_names_req(\metastore\GetPartitionsByNamesRequest $req); + /** * @param string $db_name * @param string $tbl_name * @param \metastore\Partition $new_part @@ -6485,6 +6492,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_by_names failed: unknown result"); } + public function get_partitions_by_names_req(\metastore\GetPartitionsByNamesRequest $req) + { +$this->send_get_partitions_by_names_req($req); +return $this->recv_get_partitions_by_names_req(); + } + + public function send_get_partitions_by_names_req(\metastore\GetPartitionsByNamesRequest $req) + { +$args = new \metastore\ThriftHiveMetastore_get_partitions_by_names_req_args(); +$args->req = $req; +$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); +if ($bin_accel) +{ + thrift_protocol_write_binary($this->output_, 'get_partitions_by_names_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); +} +else +{ + $this->output_->writeMessageBegin('get_partitions_by_names_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); +} + } + + public function recv_get_partitions_by_names_req() + { +$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); +if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_partitions_by_names_req_result', $this->input_->isStrictRead()); +else +{ + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { +$x = new TApplicationException(); +$x->read($this->input_); +$this->input_->readMessageEnd(); +throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_partitions_by_names_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); +} +if ($result->success !== null) { + return $result->success; +} +if ($result->o1 !== null) { + throw $result->o1; +} +if ($result->o2 !== null) { + throw $result->o2; +} +throw new \Exception("get_partitions_by_names_req failed: unknown result"); + } + public function alter_partition($db_name, $tbl_name, \metastore\Partition $new_part) { $this->send_alter_partition($db_name, $tbl_name, $new_part); @@ -16344,14 +16408,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); -$_size896 = 0; -$_etype899 = 0; -$xfer += $input->readListBegin($_etype899, $_size896); -for ($_i900 = 0; $_i900 < $_size896; ++$_i900) +$_size910 = 0; +$_etype913 = 0; +$xfer += $input->readListBegin($_etype913, $_size910); +for ($_i914 = 0; $_i914 < $_size910; ++$_i914) { - $elem901 = null; - $xfer += $input->readString($elem901); - $this->success []= $elem901; + $elem915 = null; + $xfer += $input->readString($elem915); + $this->success []= $elem915; } $xfer += $input->readListEnd(); } else { @@ -16387,9 +16451,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter902) + foreach ($this->success as $iter916) { -$xfer += $output->writeString($iter
[03/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index ba06c54..9aeae9f 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -107,6 +107,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)') print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter)') print(' get_partitions_by_names(string db_name, string tbl_name, names)') + print(' GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req)') print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)') @@ -813,6 +814,12 @@ elif cmd == 'get_partitions_by_names': sys.exit(1) pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) +elif cmd == 'get_partitions_by_names_req': + if len(args) != 1: +print('get_partitions_by_names_req requires 1 args') +sys.exit(1) + pp.pprint(client.get_partitions_by_names_req(eval(args[0]),)) + elif cmd == 'alter_partition': if len(args) != 3: print('alter_partition requires 3 args') http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 37db81f..eadf300 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -713,6 +713,13 @@ class Iface(fb303.FacebookService.Iface): """ pass + def get_partitions_by_names_req(self, req): +""" +Parameters: + - req +""" +pass + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -4835,6 +4842,41 @@ class Client(fb303.FacebookService.Client, Iface): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result") + def get_partitions_by_names_req(self, req): +""" +Parameters: + - req +""" +self.send_get_partitions_by_names_req(req) +return self.recv_get_partitions_by_names_req() + + def send_get_partitions_by_names_req(self, req): +self._oprot.writeMessageBegin('get_partitions_by_names_req', TMessageType.CALL, self._seqid) +args = get_partitions_by_names_req_args() +args.req = req +args.write(self._oprot) +self._oprot.writeMessageEnd() +self._oprot.trans.flush() + + def recv_get_partitions_by_names_req(self): +iprot = self._iprot +(fname, mtype, rseqid) = iprot.readMessageBegin() +if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x +result = get_partitions_by_names_req_result() +result.read(iprot) +iprot.readMessageEnd() +if result.success is not None: + return result.success +if result.o1 is not None: + raise result.o1 +if result.o2 is not None: + raise result.o2 +raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names_req failed: unknown result") + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -9694,6 +9736,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor): self._processMap["get_partitions_by_expr"] = Processor.process_get_partitions_by_expr self._processMap["get_num_partitions_by_filter"] = Processor.process_get_num_partitions_by_filter self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names +self._processMap["get_partitions_by_names_req"] = Processor.process_get_partitions_by_names_req self._processMap["alter_partition"] = Processor.process_alter_partition self._processMap["alter_partitions"] = Processor.process_alter_pa
[08/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 663f7d0..acac747 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ import org.slf4j.LoggerFactory; case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); -struct.component = new ArrayList(_list674.size); -LockComponent _elem675; -for (int _i676 = 0; _i676 < _list674.size; ++_i676) +org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); +struct.component = new ArrayList(_list690.size); +LockComponent _elem691; +for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem675 = new LockComponent(); - _elem675.read(iprot); - struct.component.add(_elem675); + _elem691 = new LockComponent(); + _elem691.read(iprot); + struct.component.add(_elem691); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter677 : struct.component) + for (LockComponent _iter693 : struct.component) { -_iter677.write(oprot); +_iter693.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ import org.slf4j.LoggerFactory; TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); -for (LockComponent _iter678 : struct.component) +for (LockComponent _iter694 : struct.component) { - _iter678.write(oprot); + _iter694.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ import org.slf4j.LoggerFactory; public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { -org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); -struct.component = new ArrayList(_list679.size); -LockComponent _elem680; -for (int _i681 = 0; _i681 < _list679.size; ++_i681) +org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +struct.component = new ArrayList(_list695.size); +LockComponent _elem696; +for (int _i697 = 0; _i697 < _list695.size; ++_i697) { - _elem680 = new LockComponent(); - _elem680.read(iprot); - struct.component.add(_elem680); + _elem696 = new LockComponent(); + _elem696.read(iprot); + struct.component.add(_elem696); } } struct.setComponentIsSet(true); http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java index 288c365..d5c19e1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java @@ -525,13 +525,13 @@ import org.slf4j.LoggerFactory; case 3: // EVENT_TYPE_SKIP_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
[10/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index f7d9ed2..70b6e92 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ import org.slf4j.LoggerFactory; case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); -struct.partitions = new ArrayList(_list968.size); -Partition _elem969; -for (int _i970 = 0; _i970 < _list968.size; ++_i970) +org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); +struct.partitions = new ArrayList(_list984.size); +Partition _elem985; +for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem969 = new Partition(); - _elem969.read(iprot); - struct.partitions.add(_elem969); + _elem985 = new Partition(); + _elem985.read(iprot); + struct.partitions.add(_elem985); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter971 : struct.partitions) + for (Partition _iter987 : struct.partitions) { -_iter971.write(oprot); +_iter987.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ import org.slf4j.LoggerFactory; oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); -for (Partition _iter972 : struct.partitions) +for (Partition _iter988 : struct.partitions) { - _iter972.write(oprot); + _iter988.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ import org.slf4j.LoggerFactory; struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { -org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); -struct.partitions = new ArrayList(_list973.size); -Partition _elem974; -for (int _i975 = 0; _i975 < _list973.size; ++_i975) +org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); +struct.partitions = new ArrayList(_list989.size); +Partition _elem990; +for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem974 = new Partition(); - _elem974.read(iprot); - struct.partitions.add(_elem974); + _elem990 = new Partition(); + _elem990.read(iprot); + struct.partitions.add(_elem990); } } struct.setPartitionsIsSet(true); http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index f4e3d6b..1447bb4 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ import org.slf4j.LoggerFactory; case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { -org.apache.thrift.protocol.TList _list848 = iprot.readLis
[09/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java new file mode 100644 index 000..91199e5 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java @@ -0,0 +1,752 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPartitionsByNamesRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPartitionsByNamesRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField GET_COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("get_col_stats", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { +schemes.put(StandardScheme.class, new GetPartitionsByNamesRequestStandardSchemeFactory()); +schemes.put(TupleScheme.class, new GetPartitionsByNamesRequestTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + private List names; // optional + private boolean get_col_stats; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +DB_NAME((short)1, "db_name"), +TBL_NAME((short)2, "tbl_name"), +NAMES((short)3, "names"), +GET_COL_STATS((short)4, "get_col_stats"); + +private static final Map byName = new HashMap(); + +static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { +byName.put(field.getFieldName(), field); + } +} + +/** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ +public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { +case 1: // DB_NAME + return DB_NAME; +case 2: // TBL_NAME + return TBL_NAME; +case 3: // NAMES + return NAMES; +case 4: // GET_COL_STATS + return GET_COL_STATS; +default: + return null; + } +} + +/** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ +public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; +} + +/** + * Find the _Fields co
[11/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan) Signed-off-by: Sankar Hariappan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/71dfd1d1 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/71dfd1d1 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/71dfd1d1 Branch: refs/heads/master Commit: 71dfd1d11f239caf8f16bc29db0f959e566f7659 Parents: 9747083 Author: Ashutosh Bapat Authored: Tue Jan 29 20:15:59 2019 +0530 Committer: Sankar Hariappan Committed: Tue Jan 29 20:15:59 2019 +0530 -- .../listener/DbNotificationListener.java|1 + .../hive/ql/parse/TestReplicationScenarios.java |5 +- .../ql/parse/TestStatsReplicationScenarios.java | 105 +- ...stStatsReplicationScenariosNoAutogather.java |2 - .../hadoop/hive/ql/parse/WarehouseInstance.java | 14 + .../apache/hadoop/hive/ql/exec/MoveTask.java| 54 +- .../events/filesystem/FSTableEvent.java | 13 + .../bootstrap/load/table/LoadPartitions.java|1 + .../apache/hadoop/hive/ql/metadata/Hive.java| 92 +- .../hive/ql/metadata/PartitionIterable.java | 16 +- .../ql/metadata/SessionHiveMetaStoreClient.java | 15 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 36 +- .../hive/ql/parse/repl/dump/TableExport.java| 11 +- .../repl/dump/events/AddPartitionHandler.java |8 + .../repl/dump/events/AlterPartitionHandler.java |8 + .../repl/dump/events/DropPartitionHandler.java |9 + .../dump/events/UpdatePartColStatHandler.java | 30 +- .../dump/events/UpdateTableColStatHandler.java |2 +- .../load/message/UpdatePartColStatHandler.java | 22 +- .../hadoop/hive/ql/plan/AddPartitionDesc.java |6 + .../hive/metastore/api/AbortTxnsRequest.java| 32 +- .../metastore/api/AddDynamicPartitions.java | 32 +- .../api/AllocateTableWriteIdsRequest.java | 68 +- .../api/AllocateTableWriteIdsResponse.java | 36 +- .../metastore/api/AlterPartitionsRequest.java | 36 +- .../metastore/api/ClearFileMetadataRequest.java | 32 +- .../hive/metastore/api/ClientCapabilities.java | 32 +- .../hive/metastore/api/CommitTxnRequest.java| 36 +- .../hive/metastore/api/CompactionRequest.java | 44 +- .../hive/metastore/api/CreationMetadata.java| 32 +- .../metastore/api/FindSchemasByColsResp.java| 36 +- .../hive/metastore/api/FireEventRequest.java| 32 +- .../hadoop/hive/metastore/api/Function.java | 36 +- .../metastore/api/GetAllFunctionsResponse.java | 36 +- .../api/GetFileMetadataByExprRequest.java | 32 +- .../api/GetFileMetadataByExprResult.java| 48 +- .../metastore/api/GetFileMetadataRequest.java | 32 +- .../metastore/api/GetFileMetadataResult.java| 44 +- .../metastore/api/GetOpenTxnsInfoResponse.java | 36 +- .../hive/metastore/api/GetOpenTxnsResponse.java | 32 +- .../api/GetPartitionsByNamesRequest.java| 752 .../api/GetPartitionsByNamesResult.java | 443 +++ .../metastore/api/GetPartitionsFilterSpec.java | 32 +- .../api/GetPartitionsProjectionSpec.java| 32 +- .../metastore/api/GetPartitionsRequest.java | 32 +- .../metastore/api/GetPartitionsResponse.java| 36 +- .../hive/metastore/api/GetTablesRequest.java| 32 +- .../hive/metastore/api/GetTablesResult.java | 36 +- .../metastore/api/GetValidWriteIdsRequest.java | 32 +- .../metastore/api/GetValidWriteIdsResponse.java | 36 +- .../api/HeartbeatTxnRangeResponse.java | 64 +- .../metastore/api/InsertEventRequestData.java | 96 +- .../hadoop/hive/metastore/api/LockRequest.java | 36 +- .../metastore/api/NotificationEventRequest.java | 32 +- .../api/NotificationEventResponse.java | 36 +- .../hive/metastore/api/OpenTxnRequest.java | 32 +- .../hive/metastore/api/OpenTxnsResponse.java| 32 +- .../hadoop/hive/metastore/api/Partition.java| 116 +- .../metastore/api/PutFileMetadataRequest.java | 64 +- .../metastore/api/RenamePartitionRequest.java | 32 +- .../hive/metastore/api/ReplLastIdInfo.java | 32 +- .../api/ReplTblWriteIdStateRequest.java | 32 +- .../hive/metastore/api/SchemaVersion.java | 36 +- .../hive/metastore/api/ShowCompactResponse.java | 36 +- .../hive/metastore/api/ShowLocksResponse.java | 36 +- .../hive/metastore/api/TableValidWriteIds.java | 32 +- .../hive/metastore/api/ThriftHiveMetastore.java | 3601 -- .../hive/metastore/api/WMFullResourcePlan.java | 144 +- .../api/WMGetAllResourcePlanResponse.java | 36 +- .../WMGetTriggersForResourePlanResponse.java| 36 +- .../api/WMValidateResourcePlanResponse.java | 64 +- .../api/WriteNotificationLogRequest.java| 32 +- .../gen-php/metastore/Thr
[02/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 06938b4..dee644c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -5142,6 +5142,7 @@ class Partition: - catName - writeId - isStatsCompliant + - colStats """ thrift_spec = ( @@ -5157,9 +5158,10 @@ class Partition: (9, TType.STRING, 'catName', None, None, ), # 9 (10, TType.I64, 'writeId', None, -1, ), # 10 (11, TType.BOOL, 'isStatsCompliant', None, None, ), # 11 +(12, TType.STRUCT, 'colStats', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 12 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, writeId=thrift_spec[10][4], isStatsCompliant=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, writeId=thrift_spec[10][4], isStatsCompliant=None, colStats=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -5171,6 +5173,7 @@ class Partition: self.catName = catName self.writeId = writeId self.isStatsCompliant = isStatsCompliant +self.colStats = colStats def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -5249,6 +5252,12 @@ class Partition: self.isStatsCompliant = iprot.readBool() else: iprot.skip(ftype) + elif fid == 12: +if ftype == TType.STRUCT: + self.colStats = ColumnStatistics() + self.colStats.read(iprot) +else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -5310,6 +5319,10 @@ class Partition: oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 11) oprot.writeBool(self.isStatsCompliant) oprot.writeFieldEnd() +if self.colStats is not None: + oprot.writeFieldBegin('colStats', TType.STRUCT, 12) + self.colStats.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5330,6 +5343,7 @@ class Partition: value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.writeId) value = (value * 31) ^ hash(self.isStatsCompliant) +value = (value * 31) ^ hash(self.colStats) return value def __repr__(self): @@ -11081,6 +11095,198 @@ class PartitionValuesResponse: def __ne__(self, other): return not (self == other) +class GetPartitionsByNamesRequest: + """ + Attributes: + - db_name + - tbl_name + - names + - get_col_stats + """ + + thrift_spec = ( +None, # 0 +(1, TType.STRING, 'db_name', None, None, ), # 1 +(2, TType.STRING, 'tbl_name', None, None, ), # 2 +(3, TType.LIST, 'names', (TType.STRING,None), None, ), # 3 +(4, TType.BOOL, 'get_col_stats', None, None, ), # 4 + ) + + def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None,): +self.db_name = db_name +self.tbl_name = tbl_name +self.names = names +self.get_col_stats = get_col_stats + + def read(self, iprot): +if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return +iprot.readStructBegin() +while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: +break + if fid == 1: +if ftype == TType.STRING: + self.db_name = iprot.readString() +else: + iprot.skip(ftype) + elif fid == 2: +if ftype == TType.STRING: + self.tbl_name = iprot.readString() +else: + iprot.skip(ftype) + elif fid == 3: +if ftype == TType.LIST: + self.names = [] + (_etype491, _size488) = iprot.readListBegin() + for _i492 in xrange(_size488): +_elem493 = iprot.readString() +self.names.append(_elem493) + iprot.readListEnd() +else: + iprot.skip(ftype) + elif fid == 4: +if ftype == TType.BOOL: + self.get_col_stats = iprot.readBool() +else: + ip
[04/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 1ae447d..4f5f8eb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -7442,6 +7442,10 @@ class Partition { * @var bool */ public $isStatsCompliant = null; + /** + * @var \metastore\ColumnStatistics + */ + public $colStats = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -7504,6 +7508,11 @@ class Partition { 'var' => 'isStatsCompliant', 'type' => TType::BOOL, ), +12 => array( + 'var' => 'colStats', + 'type' => TType::STRUCT, + 'class' => '\metastore\ColumnStatistics', + ), ); } if (is_array($vals)) { @@ -7540,6 +7549,9 @@ class Partition { if (isset($vals['isStatsCompliant'])) { $this->isStatsCompliant = $vals['isStatsCompliant']; } + if (isset($vals['colStats'])) { +$this->colStats = $vals['colStats']; + } } } @@ -7664,6 +7676,14 @@ class Partition { $xfer += $input->skip($ftype); } break; +case 12: + if ($ftype == TType::STRUCT) { +$this->colStats = new \metastore\ColumnStatistics(); +$xfer += $this->colStats->read($input); + } else { +$xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7763,6 +7783,14 @@ class Partition { $xfer += $output->writeBool($this->isStatsCompliant); $xfer += $output->writeFieldEnd(); } +if ($this->colStats !== null) { + if (!is_object($this->colStats)) { +throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('colStats', TType::STRUCT, 12); + $xfer += $this->colStats->write($output); + $xfer += $output->writeFieldEnd(); +} $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -15898,6 +15926,279 @@ class PartitionValuesResponse { } +class GetPartitionsByNamesRequest { + static $_TSPEC; + + /** + * @var string + */ + public $db_name = null; + /** + * @var string + */ + public $tbl_name = null; + /** + * @var string[] + */ + public $names = null; + /** + * @var bool + */ + public $get_col_stats = null; + + public function __construct($vals=null) { +if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( +1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), +2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), +3 => array( + 'var' => 'names', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( +'type' => TType::STRING, +), + ), +4 => array( + 'var' => 'get_col_stats', + 'type' => TType::BOOL, + ), +); +} +if (is_array($vals)) { + if (isset($vals['db_name'])) { +$this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { +$this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['names'])) { +$this->names = $vals['names']; + } + if (isset($vals['get_col_stats'])) { +$this->get_col_stats = $vals['get_col_stats']; + } +} + } + + public function getName() { +return 'GetPartitionsByNamesRequest'; + } + + public function read($input) + { +$xfer = 0; +$fname = null; +$ftype = 0; +$fid = 0; +$xfer += $input->readStructBegin($fname); +while (true) +{ + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { +break; + } + switch ($fid) + { +case 1: + if ($ftype == TType::STRING) { +$xfer += $input->readString($this->db_name); + } else { +$xfer += $input->skip($ftype); + } + break; +case 2: + if ($ftype == TType::STRING) { +$xfer += $input->readString($this->tbl_name); + } else { +$xfer += $input->skip($ftype); + } + break; +case 3: + if ($ftype == TType::LST) { +$this->names = array(); +$_size488 = 0; +$_etype491 = 0; +$xfer += $input->readL
[01/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
Repository: hive Updated Branches: refs/heads/master 974708336 -> 71dfd1d11 http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java -- diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c0b1d87..b43fb5e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3521,6 +3521,29 @@ public class HiveMetaStore extends ThriftHiveMetastore { new AddPartitionEvent(tbl, newParts, true, this)); } +if (!listeners.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, + EventType.ADD_PARTITION, + new AddPartitionEvent(tbl, newParts, true, this), + null, + transactionalListenerResponses, ms); + + if (!existingParts.isEmpty()) { +// The request has succeeded but we failed to add these partitions. +MetaStoreListenerNotifier.notifyEvent(listeners, +EventType.ADD_PARTITION, +new AddPartitionEvent(tbl, existingParts, false, this), +null, null, ms); + } +} + +// Update partition column statistics if available +for (Partition newPart : newParts) { + if (newPart.isSetColStats()) { +updatePartitonColStatsInternal(tbl, newPart.getColStats(), null, newPart.getWriteId()); + } +} + success = ms.commitTransaction(); } finally { if (!success) { @@ -3533,24 +3556,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { new AddPartitionEvent(tbl, parts, false, this), null, null, ms); } -} else { - if (!listeners.isEmpty()) { -MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, newParts, true, this), - null, - transactionalListenerResponses, ms); - -if (!existingParts.isEmpty()) { - // The request has succeeded but we failed to add these partitions. - MetaStoreListenerNotifier.notifyEvent(listeners, -EventType.ADD_PARTITION, -new AddPartitionEvent(tbl, existingParts, false, this), -null, null, ms); -} - } } } + return newParts; } @@ -6047,12 +6055,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_PARTITION_COLUMN_STAT, - new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, validWriteIds, writeId, this)); + new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, validWriteIds, + writeId, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_PARTITION_COLUMN_STAT, - new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, validWriteIds, writeId, this)); + new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, validWriteIds, + writeId, this)); } } committed = getMS().commitTransaction(); @@ -6336,25 +6346,66 @@ public class HiveMetaStore extends ThriftHiveMetastore { @Override public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, false); +} - String[] parsedDbName = parseDbName(dbName, conf); +@Override +public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest gpbnr) +throws TException { + List partitions = get_partitions_by_names(gpbnr.getDb_name(), + gpbnr.getTbl_name(), gpbnr.getNa
[07/11] hive git commit: HIVE-21079: Replicate column statistics for partitions of partitioned table (Ashutosh Bapat, reviewed by Sankar Hariappan)
http://git-wip-us.apache.org/repos/asf/hive/blob/71dfd1d1/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java -- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 1bdbbbf..e0431e5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -208,6 +208,8 @@ import org.slf4j.LoggerFactory; public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; +public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public void alter_partitions(String db_name, String tbl_name, List new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException; @@ -656,6 +658,8 @@ import org.slf4j.LoggerFactory; public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; +public void get_partitions_by_names_req(GetPartitionsByNamesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void alter_partitions(String db_name, String tbl_name, List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3460,6 +3464,35 @@ import org.slf4j.LoggerFactory; throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } +public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException +{ + send_get_partitions_by_names_req(req); + return recv_get_partitions_by_names_req(); +} + +public void send_get_partitions_by_names_req(GetPartitionsByNamesRequest req) throws org.apache.thrift.TException +{ + get_partitions_by_names_req_args args = new get_partitions_by_names_req_args(); + args.setReq(req); + sendBase("get_partitions_by_names_req", args); +} + +public GetPartitionsByNamesResult recv_get_partitions_by_names_req() throws MetaException, NoSuchObjectException, org.apache.thrift.TException +{ + get_partitions_by_names_req_result result = new get_partitions_by_names_req_result(); + receiveBase(result, "get_partitions_by_names_req"); + if (result.isSetSuccess()) { +return result.success; + } + if (result.o1 != null) { +throw result.o1; + } + if (result.o2 != null) { +throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_names_req failed: unknown result"); +} + public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException { send_alter_partition(db_name, tbl_name, new_part); @@ -10309,6 +10342,38 @@ import org.slf4j.LoggerFactory; } } +public void get_partitions_by_names_req(GetPartitionsByNamesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_partitions_by_names_req_call method_call = new get_partitions_by_names_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); +} + +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetPartitionsByNamesRequest req; + public get_partitions_by_names_req_call(GetPartitionsByNamesRequest req, org.apache.thrift.async.