deniskuzZ commented on code in PR #5995: URL: https://github.com/apache/hive/pull/5995#discussion_r2287636930
########## iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java: ########## @@ -87,4 +103,39 @@ public static void alterTable( } } } + + public static Table convertIcebergTableToHiveTable(org.apache.iceberg.Table icebergTable, Configuration conf) { + Table hiveTable = new Table(); + TableMetadata metadata = ((BaseTable) icebergTable).operations().current(); + long maxHiveTablePropertySize = conf.getLong(HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE, + HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE_DEFAULT); + HMSTablePropertyHelper.updateHmsTableForIcebergTable(metadata.metadataFileLocation(), hiveTable, metadata, + null, true, maxHiveTablePropertySize, null); + hiveTable.getParameters().put(CatalogUtils.ICEBERG_CATALOG_TYPE, CatalogUtil.ICEBERG_CATALOG_TYPE_REST); + TableName tableName = TableName.fromString(icebergTable.name(), null, null); + hiveTable.setTableName(tableName.getTable()); + hiveTable.setDbName(tableName.getDb()); + StorageDescriptor storageDescriptor = new StorageDescriptor(); + hiveTable.setSd(storageDescriptor); + hiveTable.setTableType("EXTERNAL_TABLE"); + hiveTable.setPartitionKeys(new LinkedList<>()); + List<FieldSchema> cols = new LinkedList<>(); + storageDescriptor.setCols(cols); + storageDescriptor.setLocation(icebergTable.location()); + storageDescriptor.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); + storageDescriptor.setOutputFormat(DEFAULT_OUTPUT_FORMAT_CLASS); + storageDescriptor.setBucketCols(new LinkedList<>()); + storageDescriptor.setSortCols(new LinkedList<>()); + storageDescriptor.setParameters(Maps.newHashMap()); + SerDeInfo serDeInfo = new SerDeInfo("icebergSerde", DEFAULT_SERDE_CLASS, Maps.newHashMap()); + serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); // Default serialization format. + storageDescriptor.setSerdeInfo(serDeInfo); + icebergTable.schema().columns().forEach(icebergColumn -> { + FieldSchema fieldSchema = new FieldSchema(); + fieldSchema.setName(icebergColumn.name()); + fieldSchema.setType(icebergColumn.type().toString()); + cols.add(fieldSchema); Review Comment: wonder if other catalogs supports partitioning and we could directly set this information. we can investigate this in a follow-up I've added in a snippet code but commented it out: /*result.setPartitionKeys( IcebergTableUtil.getPartitionKeys(table, table.spec().specId()));*/ ########## iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreUtil.java: ########## @@ -87,4 +103,39 @@ public static void alterTable( } } } + + public static Table convertIcebergTableToHiveTable(org.apache.iceberg.Table icebergTable, Configuration conf) { + Table hiveTable = new Table(); + TableMetadata metadata = ((BaseTable) icebergTable).operations().current(); + long maxHiveTablePropertySize = conf.getLong(HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE, + HiveOperationsBase.HIVE_TABLE_PROPERTY_MAX_SIZE_DEFAULT); + HMSTablePropertyHelper.updateHmsTableForIcebergTable(metadata.metadataFileLocation(), hiveTable, metadata, + null, true, maxHiveTablePropertySize, null); + hiveTable.getParameters().put(CatalogUtils.ICEBERG_CATALOG_TYPE, CatalogUtil.ICEBERG_CATALOG_TYPE_REST); + TableName tableName = TableName.fromString(icebergTable.name(), null, null); + hiveTable.setTableName(tableName.getTable()); + hiveTable.setDbName(tableName.getDb()); + StorageDescriptor storageDescriptor = new StorageDescriptor(); + hiveTable.setSd(storageDescriptor); + hiveTable.setTableType("EXTERNAL_TABLE"); + hiveTable.setPartitionKeys(new LinkedList<>()); + List<FieldSchema> cols = new LinkedList<>(); + storageDescriptor.setCols(cols); + storageDescriptor.setLocation(icebergTable.location()); + storageDescriptor.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); + storageDescriptor.setOutputFormat(DEFAULT_OUTPUT_FORMAT_CLASS); + storageDescriptor.setBucketCols(new LinkedList<>()); + storageDescriptor.setSortCols(new LinkedList<>()); + storageDescriptor.setParameters(Maps.newHashMap()); + SerDeInfo serDeInfo = new SerDeInfo("icebergSerde", DEFAULT_SERDE_CLASS, Maps.newHashMap()); + serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); // Default serialization format. + storageDescriptor.setSerdeInfo(serDeInfo); + icebergTable.schema().columns().forEach(icebergColumn -> { + FieldSchema fieldSchema = new FieldSchema(); + fieldSchema.setName(icebergColumn.name()); + fieldSchema.setType(icebergColumn.type().toString()); + cols.add(fieldSchema); Review Comment: wonder if other catalogs supports partitioning and we could directly set this information. we can investigate this in a follow-up I've added in a snippet code but commented it out: ```` result.setPartitionKeys( IcebergTableUtil.getPartitionKeys(table, table.spec().specId())); ```` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For additional commands, e-mail: gitbox-h...@hive.apache.org