xuefuz commented on a change in pull request #8449: [FLINK-12235][hive] Support
Hive partition in HiveCatalog
URL: https://github.com/apache/flink/pull/8449#discussion_r287007547
##########
File path:
flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/catalog/hive/HiveCatalog.java
##########
@@ -608,44 +611,283 @@ private static Table instantiateHiveTable(ObjectPath
tablePath, CatalogBaseTabl
// ------ partitions ------
@Override
- public void createPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, CatalogPartition partition, boolean ignoreIfExists)
- throws TableNotExistException,
TableNotPartitionedException, PartitionSpecInvalidException,
PartitionAlreadyExistsException, CatalogException {
- throw new UnsupportedOperationException();
+ public boolean partitionExists(ObjectPath tablePath,
CatalogPartitionSpec partitionSpec)
+ throws CatalogException {
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+ try {
+ return getHivePartition(tablePath, partitionSpec) !=
null;
+ } catch (NoSuchObjectException | TableNotExistException |
PartitionSpecInvalidException e) {
+ return false;
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to get partition %s of
table %s", partitionSpec, tablePath), e);
+ }
}
@Override
- public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, boolean ignoreIfNotExists)
- throws PartitionNotExistException, CatalogException {
- throw new UnsupportedOperationException();
+ public void createPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, CatalogPartition partition, boolean ignoreIfExists)
+ throws TableNotExistException,
TableNotPartitionedException, PartitionSpecInvalidException,
PartitionAlreadyExistsException, CatalogException {
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+ checkNotNull(partition, "Partition cannot be null");
+
+ if (!(partition instanceof HiveCatalogPartition)) {
+ throw new CatalogException("Currently only supports
HiveCatalogPartition");
+ }
+
+ Table hiveTable = getHiveTable(tablePath);
+
+ ensureTableAndPartitionMatch(hiveTable, partition);
+
+ ensurePartitionedTable(tablePath, hiveTable);
+
+ try {
+
client.add_partition(instantiateHivePartition(hiveTable, partitionSpec,
partition));
+ } catch (AlreadyExistsException e) {
+ if (!ignoreIfExists) {
+ throw new
PartitionAlreadyExistsException(catalogName, tablePath, partitionSpec);
+ }
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to create partition %s of
table %s", partitionSpec, tablePath));
+ }
}
@Override
- public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists)
+ public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, boolean ignoreIfNotExists)
throws PartitionNotExistException, CatalogException {
- throw new UnsupportedOperationException();
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+ try {
+ Table hiveTable = getHiveTable(tablePath);
+ client.dropPartition(tablePath.getDatabaseName(),
tablePath.getObjectName(),
+ getOrderedFullPartitionValues(partitionSpec,
getFieldNames(hiveTable.getPartitionKeys()), tablePath), true);
+ } catch (NoSuchObjectException e) {
+ if (!ignoreIfNotExists) {
+ throw new
PartitionNotExistException(catalogName, tablePath, partitionSpec, e);
+ }
+ } catch (MetaException | TableNotExistException |
PartitionSpecInvalidException e) {
+ throw new PartitionNotExistException(catalogName,
tablePath, partitionSpec, e);
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to drop partition %s of
table %s", partitionSpec, tablePath));
+ }
}
@Override
public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath)
throws TableNotExistException,
TableNotPartitionedException, CatalogException {
- throw new UnsupportedOperationException();
+ checkNotNull(tablePath, "Table path cannot be null");
+
+ Table hiveTable = getHiveTable(tablePath);
+
+ ensurePartitionedTable(tablePath, hiveTable);
+
+ try {
+ // pass -1 as max_parts to fetch all partitions
+ return
client.listPartitionNames(tablePath.getDatabaseName(),
tablePath.getObjectName(), (short) -1).stream()
+
.map(HiveCatalog::createPartitionSpec).collect(Collectors.toList());
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to list partitions of
table %s", tablePath), e);
+ }
}
@Override
public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath,
CatalogPartitionSpec partitionSpec)
throws TableNotExistException,
TableNotPartitionedException, CatalogException {
- throw new UnsupportedOperationException();
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+
+ Table hiveTable = getHiveTable(tablePath);
+
+ ensurePartitionedTable(tablePath, hiveTable);
+
+ try {
+ // partition spec can be partial
+ List<String> partialVals =
MetaStoreUtils.getPvals(hiveTable.getPartitionKeys(),
partitionSpec.getPartitionSpec());
+ return
client.listPartitionNames(tablePath.getDatabaseName(),
tablePath.getObjectName(), partialVals,
+ (short)
-1).stream().map(HiveCatalog::createPartitionSpec).collect(Collectors.toList());
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to list partitions of
table %s", tablePath), e);
+ }
}
@Override
public CatalogPartition getPartition(ObjectPath tablePath,
CatalogPartitionSpec partitionSpec)
throws PartitionNotExistException, CatalogException {
- throw new UnsupportedOperationException();
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+
+ try {
+ Partition hivePartition = getHivePartition(tablePath,
partitionSpec);
+ return instantiateCatalogPartition(hivePartition);
+ } catch (NoSuchObjectException | MetaException |
TableNotExistException | PartitionSpecInvalidException e) {
+ throw new PartitionNotExistException(catalogName,
tablePath, partitionSpec, e);
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to get partition %s of
table %s", partitionSpec, tablePath), e);
+ }
}
@Override
- public boolean partitionExists(ObjectPath tablePath,
CatalogPartitionSpec partitionSpec) throws CatalogException {
- throw new UnsupportedOperationException();
+ public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec
partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists)
+ throws PartitionNotExistException, CatalogException {
+ checkNotNull(tablePath, "Table path cannot be null");
+ checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be
null");
+ checkNotNull(newPartition, "New partition cannot be null");
+
+ if (!(newPartition instanceof HiveCatalogPartition)) {
+ throw new CatalogException("Currently only supports
HiveCatalogPartition");
+ }
+
+ // Explicitly check if the partition exists or not
+ // because alter_partition() doesn't throw
NoSuchObjectException like dropPartition() when the target doesn't exist
+ try {
+ Table hiveTable = getHiveTable(tablePath);
+ ensureTableAndPartitionMatch(hiveTable, newPartition);
+ Partition oldHivePartition =
getHivePartition(hiveTable, partitionSpec);
+ if (oldHivePartition == null) {
+ if (ignoreIfNotExists) {
+ return;
+ }
+ throw new
PartitionNotExistException(catalogName, tablePath, partitionSpec);
+ }
+ Partition newHivePartition =
instantiateHivePartition(hiveTable, partitionSpec, newPartition);
+ if (newHivePartition.getSd().getLocation() == null) {
+
newHivePartition.getSd().setLocation(oldHivePartition.getSd().getLocation());
+ }
+ client.alter_partition(
+ tablePath.getDatabaseName(),
+ tablePath.getObjectName(),
+ newHivePartition
+ );
+ } catch (NoSuchObjectException e) {
+ if (!ignoreIfNotExists) {
+ throw new
PartitionNotExistException(catalogName, tablePath, partitionSpec, e);
+ }
+ } catch (InvalidOperationException | MetaException |
TableNotExistException | PartitionSpecInvalidException e) {
+ throw new PartitionNotExistException(catalogName,
tablePath, partitionSpec, e);
+ } catch (TException e) {
+ throw new CatalogException(
+ String.format("Failed to alter existing
partition with new partition %s of table %s",
+ partitionSpec, tablePath), e);
+ }
+ }
+
+ // make sure both table and partition are generic, or neither is
+ private void ensureTableAndPartitionMatch(Table hiveTable,
CatalogPartition catalogPartition) {
Review comment:
static?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services