deniskuzZ commented on code in PR #5123:
URL: https://github.com/apache/hive/pull/5123#discussion_r1575838382
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergMajorQueryCompactor.java:
##########
@@ -44,22 +55,71 @@ public boolean run(CompactorContext context) throws
IOException, HiveException,
Map<String, String> tblProperties = context.getTable().getParameters();
LOG.debug("Initiating compaction for the {} table", compactTableName);
- String compactionQuery = String.format("insert overwrite table %s select *
from %<s",
- compactTableName);
+ HiveConf conf = new HiveConf(context.getConf());
+ String partSpec = context.getCompactionInfo().partName;
+ String compactionQuery;
+
+ if (partSpec == null) {
+ HiveConf.setVar(conf, ConfVars.REWRITE_POLICY,
RewritePolicy.ALL_PARTITIONS.name());
+ compactionQuery = String.format("insert overwrite table %s select * from
%<s", compactTableName);
+ } else {
+ Table table = IcebergTableUtil.getTable(conf, context.getTable());
+ List<String> partFields =
table.spec().fields().stream().map(PartitionField::name).collect(Collectors.toList());
+ String queryFields =
table.schema().columns().stream().map(Types.NestedField::name)
+ .filter(f ->
!partFields.contains(f)).collect(Collectors.joining(","));
+ PartitionData partitionData = DataFiles.data(table.spec(), partSpec);
+ compactionQuery = String.format("insert overwrite table %1$s
partition(%2$s) select %4$s from %1$s where %3$s",
+ compactTableName, partDataToSQL(partitionData, partSpec, ","),
+ partDataToSQL(partitionData, partSpec, " and "), queryFields);
+ }
+
+ SessionState sessionState = setupQueryCompactionSession(conf,
context.getCompactionInfo(), tblProperties);
- SessionState sessionState = setupQueryCompactionSession(context.getConf(),
- context.getCompactionInfo(), tblProperties);
- HiveConf.setVar(context.getConf(), ConfVars.REWRITE_POLICY,
RewritePolicy.ALL_PARTITIONS.name());
try {
- DriverUtils.runOnDriver(context.getConf(), sessionState,
compactionQuery);
+ DriverUtils.runOnDriver(conf, sessionState, compactionQuery);
LOG.info("Completed compaction for table {}", compactTableName);
+ return true;
} catch (HiveException e) {
- LOG.error("Error doing query based {} compaction",
RewritePolicy.ALL_PARTITIONS.name(), e);
- throw new RuntimeException(e);
+ LOG.error("Failed compacting table {}", compactTableName, e);
+ throw e;
} finally {
sessionState.setCompaction(false);
}
+ }
+
+ private String partDataToSQL(PartitionData partitionData, String partSpec,
String delimiter)
+ throws HiveException {
+
+ try {
+ Map<String, String> partSpecMap = Warehouse.makeSpecFromName(partSpec);
+ StringBuilder sb = new StringBuilder();
+
+ for (int i = 0; i < partitionData.size(); ++i) {
+ if (i > 0) {
+ sb.append(delimiter);
+ }
+
+ String quoteOpt = "";
+ if (partitionData.getType(i).typeId() == Type.TypeID.STRING ||
+ partitionData.getType(i).typeId() == Type.TypeID.DATE ||
+ partitionData.getType(i).typeId() == Type.TypeID.TIME ||
+ partitionData.getType(i).typeId() == Type.TypeID.TIMESTAMP ||
+ partitionData.getType(i).typeId() == Type.TypeID.BINARY) {
+ quoteOpt = "'";
+ }
- return true;
+ String fieldName = partitionData.getSchema().getFields().get(i).name();
+
+ sb.append(fieldName)
+ .append("=")
+ .append(quoteOpt)
+ .append(partSpecMap.get(fieldName))
+ .append(quoteOpt);
Review Comment:
how about
````
Map<String, String> partSpecMap = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpecMap, new Path(partSpec), null);
List<FieldSchema> partitionKeys =
table.getStorageHandler().getPartitionKeys(table);
List<String> partValues = partitionKeys.stream().map(
fs -> String.join("=", HiveUtils.unparseIdentifier(fs.getName()),
TypeInfoUtils.convertStringToLiteralForSQL(partSpecMap.get(fs.getName()),
((PrimitiveTypeInfo)
TypeInfoUtils.getTypeInfoFromTypeString(fs.getType())).getPrimitiveCategory())
)
).collect(Collectors.toList());
String queryFields = table.getCols().stream()
.map(FieldSchema::getName)
.filter(col -> !partSpecMap.containsKey(col))
.collect(Collectors.joining(","));
compactionQuery = String.format("insert overwrite table %1$s partition(%2$s)
select %4$s from %1$s where %3$s",
compactTableName,
StringUtils.join(partValues, ","),
StringUtils.join(partValues, " and "),
queryFields);
````
table.getStorageHandler().getColumnInfo looks expensive and unnecessary
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]