Repository: incubator-hawq Updated Branches: refs/heads/master fad1b9050 -> 6b2a18e72
HAWQ-1213. Incorrect check of hawqregister in case of randomly distributed table with non-default `default_hash_table_bucket_number` value. Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/6b2a18e7 Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/6b2a18e7 Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/6b2a18e7 Branch: refs/heads/master Commit: 6b2a18e72e956b03dce86d304a98a59d121f27a3 Parents: fad1b90 Author: xunzhang <[email protected]> Authored: Sun Dec 11 16:32:06 2016 +0800 Committer: xunzhang <[email protected]> Committed: Mon Dec 12 13:42:10 2016 +0800 ---------------------------------------------------------------------- tools/bin/hawqregister | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6b2a18e7/tools/bin/hawqregister ---------------------------------------------------------------------- diff --git a/tools/bin/hawqregister b/tools/bin/hawqregister index 73ce204..7ec14a1 100755 --- a/tools/bin/hawqregister +++ b/tools/bin/hawqregister @@ -234,24 +234,33 @@ class GpRegisterAccessor(object): else: partlist = partlist + "PARTITION " + partition_refine_name + " " + partitions_constraint[index] + bucket_number_policy = ', bucketnum=%s)' % bucket_number if distrbution_policy != 'DISTRIBUTED RANDOMLY' else ')' fmt = 'ROW' if fmt == 'AO' else fmt if fmt == 'ROW': if partitionby is None: - query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, checksum=%s, bucketnum=%s) %s;' - % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['Checksum'], bucket_number, distrbution_policy)) + query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, checksum=%s%s %s;' + % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['Checksum'], bucket_number_policy, distrbution_policy)) else: - query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, checksum=%s, bucketnum=%s) %s %s (%s);' - % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['Checksum'], bucket_number, distrbution_policy, partitionby, partlist)) + query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, checksum=%s%s %s %s (%s);' + % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['Checksum'], bucket_number_policy, distrbution_policy, partitionby, partlist)) else: # Parquet if partitionby is None: - query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, pagesize=%s, rowgroupsize=%s, bucketnum=%s) %s;' - % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['PageSize'], file_locations['RowGroupSize'], bucket_number, distrbution_policy)) + query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, pagesize=%s, rowgroupsize=%s%s %s;' + % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['PageSize'], file_locations['RowGroupSize'], bucket_number_policy, distrbution_policy)) else: - query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, pagesize=%s, rowgroupsize=%s, bucketnum=%s) %s %s (%s);' - % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['PageSize'], file_locations['RowGroupSize'], bucket_number, distrbution_policy, partitionby, partlist)) + query = ('create table %s(%s) with (appendonly=true, orientation=%s, compresstype=%s, compresslevel=%s, pagesize=%s, rowgroupsize=%s%s %s %s (%s);' + % (tablename, schema, fmt, file_locations['CompressionType'], file_locations['CompressionLevel'], file_locations['PageSize'], file_locations['RowGroupSize'], bucket_number_policy, distrbution_policy, partitionby, partlist)) self.conn.query(query) return True, query + def is_hash_distributed(self, tablename): + schemaname, tablename = tablename_handler(tablename) + qry = """select attrnums from gp_distribution_policy, pg_class where pg_class.relnamespace = '%s' and pg_class.relname = '%s' and pg_class.oid = gp_distribution_policy.localoid;""" % (self.get_schema_id(schemaname), tablename) + rows = self.exec_query(qry) + if rows[0]['attrnums']: + return True + return False + def check_hash_type(self, tablename): schemaname, tablename = tablename_handler(tablename) qry = """select attrnums from gp_distribution_policy, pg_class where pg_class.relnamespace = '%s' and pg_class.relname = '%s' and pg_class.oid = gp_distribution_policy.localoid;""" % (self.get_schema_id(schemaname), tablename) @@ -402,6 +411,9 @@ class HawqRegister(object): else: return 'usage1' + def _is_hash_distributed(self): + return self.accessor.is_hash_distributed(self.dst_table_name) + def _check_hash_type(self): self.accessor.check_hash_type(self.dst_table_name) @@ -645,7 +657,8 @@ class HawqRegister(object): def _do_check(self): if self.yml: - self._check_bucket_number() + if self._is_hash_distributed(): + self._check_bucket_number() self._check_distribution_policy() self._check_policy_consistency() self._check_no_regex_filepath(self.files)
