This is an automated email from the ASF dual-hosted git repository.
jshao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git
The following commit(s) were added to refs/heads/main by this push:
new d1531583a [#4188] Renamed local instance of StorageDescriptor in
buildStorageDescriptor (#4276)
d1531583a is described below
commit d1531583aac7ceb3250b943342a99e4106462095
Author: Ashwin Kherde <[email protected]>
AuthorDate: Sun Jul 28 22:01:37 2024 -0400
[#4188] Renamed local instance of StorageDescriptor in
buildStorageDescriptor (#4276)
### What changes were proposed in this pull request?
The local variable sd in the buildStorageDescriptor method of
HiveTable.java has been renamed.
### Why are the changes needed?
The local variable sd was hiding a class member, which could lead to
potential bugs and confusion.
Fix: #4188
---
.../apache/gravitino/catalog/hive/HiveTable.java | 25 +++++++++++-----------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git
a/catalogs/catalog-hive/src/main/java/org/apache/gravitino/catalog/hive/HiveTable.java
b/catalogs/catalog-hive/src/main/java/org/apache/gravitino/catalog/hive/HiveTable.java
index bb5d865e9..f1c5f45fb 100644
---
a/catalogs/catalog-hive/src/main/java/org/apache/gravitino/catalog/hive/HiveTable.java
+++
b/catalogs/catalog-hive/src/main/java/org/apache/gravitino/catalog/hive/HiveTable.java
@@ -263,10 +263,10 @@ public class HiveTable extends BaseTable {
private StorageDescriptor buildStorageDescriptor(
PropertiesMetadata tablePropertiesMetadata, List<FieldSchema>
partitionFields) {
- StorageDescriptor sd = new StorageDescriptor();
+ StorageDescriptor strgDesc = new StorageDescriptor();
List<String> partitionKeys =
partitionFields.stream().map(FieldSchema::getName).collect(Collectors.toList());
- sd.setCols(
+ strgDesc.setCols(
Arrays.stream(columns)
.filter(c -> !partitionKeys.contains(c.name()))
.map(
@@ -282,38 +282,39 @@ public class HiveTable extends BaseTable {
// `location` must not be null, otherwise it will result in an NPE when
calling HMS `alterTable`
// interface
Optional.ofNullable(properties().get(HiveTablePropertiesMetadata.LOCATION))
- .ifPresent(l ->
sd.setLocation(properties().get(HiveTablePropertiesMetadata.LOCATION)));
+ .ifPresent(
+ l ->
strgDesc.setLocation(properties().get(HiveTablePropertiesMetadata.LOCATION)));
- sd.setSerdeInfo(buildSerDeInfo(tablePropertiesMetadata));
+ strgDesc.setSerdeInfo(buildSerDeInfo(tablePropertiesMetadata));
StorageFormat storageFormat =
(StorageFormat)
tablePropertiesMetadata.getOrDefault(properties(),
HiveTablePropertiesMetadata.FORMAT);
- sd.setInputFormat(storageFormat.getInputFormat());
- sd.setOutputFormat(storageFormat.getOutputFormat());
+ strgDesc.setInputFormat(storageFormat.getInputFormat());
+ strgDesc.setOutputFormat(storageFormat.getOutputFormat());
// Individually specified INPUT_FORMAT and OUTPUT_FORMAT can override the
inputFormat and
// outputFormat of FORMAT
Optional.ofNullable(properties().get(HiveTablePropertiesMetadata.INPUT_FORMAT))
- .ifPresent(sd::setInputFormat);
+ .ifPresent(strgDesc::setInputFormat);
Optional.ofNullable(properties().get(HiveTablePropertiesMetadata.OUTPUT_FORMAT))
- .ifPresent(sd::setOutputFormat);
+ .ifPresent(strgDesc::setOutputFormat);
if (ArrayUtils.isNotEmpty(sortOrders)) {
for (SortOrder sortOrder : sortOrders) {
String columnName = ((NamedReference.FieldReference)
sortOrder.expression()).fieldName()[0];
- sd.addToSortCols(
+ strgDesc.addToSortCols(
new Order(columnName, sortOrder.direction() ==
SortDirection.ASCENDING ? 1 : 0));
}
}
if (!Distributions.NONE.equals(distribution)) {
- sd.setBucketCols(
+ strgDesc.setBucketCols(
Arrays.stream(distribution.expressions())
.map(t -> ((NamedReference.FieldReference) t).fieldName()[0])
.collect(Collectors.toList()));
- sd.setNumBuckets(distribution.number());
+ strgDesc.setNumBuckets(distribution.number());
}
- return sd;
+ return strgDesc;
}
private SerDeInfo buildSerDeInfo(PropertiesMetadata tablePropertiesMetadata)
{