soumilshah1995 commented on issue #8019:
URL: https://github.com/apache/hudi/issues/8019#issuecomment-1440353963
Hi @papablus
is this right way to call show commits procedure ?
```
try:
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql.session import SparkSession
from awsglue.dynamicframe import DynamicFrame
from pyspark.sql.functions import col, to_timestamp,
monotonically_increasing_id, to_date, when
from pyspark.sql.functions import *
from awsglue.utils import getResolvedOptions
from awsglueml.transforms import EntityDetector
from pyspark.sql.types import StringType
from pyspark.sql.types import *
from datetime import datetime
import boto3
from functools import reduce
except Exception as e:
print("Error ")
spark = (SparkSession.builder
.config('spark.serializer',
'org.apache.spark.serializer.KryoSerializer')
.config('spark.sql.hive.convertMetastoreParquet', 'false')
.config('spark.sql.catalog.spark_catalog',
'org.apache.spark.sql.hudi.catalog.HoodieCatalog')
.config('spark.sql.extensions',
'org.apache.spark.sql.hudi.HoodieSparkSessionExtension')
.config('spark.sql.legacy.pathOptionBehavior.enabled',
'true').getOrCreate())
sc = spark.sparkContext
glueContext = GlueContext(sc)
job = Job(glueContext)
logger = glueContext.get_logger()
db_name = "hudidb"
table_name = "hudi_table"
recordkey = 'emp_id'
path = "s3://hudi-demos-emr-serverless-project-soumil/tmp/"
method = 'upsert'
table_type = "COPY_ON_WRITE"
precombine = "ts"
partiton_field = "date"
connection_options = {
"path": path,
"connectionName": "hudi-connection",
"hoodie.datasource.write.storage.type": table_type,
'hoodie.datasource.write.precombine.field': precombine,
'className': 'org.apache.hudi',
'hoodie.table.name': table_name,
'hoodie.datasource.write.recordkey.field': recordkey,
'hoodie.datasource.write.table.name': table_name,
'hoodie.datasource.write.operation': method,
'hoodie.datasource.hive_sync.enable': 'true',
"hoodie.datasource.hive_sync.mode": "hms",
'hoodie.datasource.hive_sync.sync_as_datasource': 'false',
'hoodie.datasource.hive_sync.database': db_name,
'hoodie.datasource.hive_sync.table': table_name,
'hoodie.datasource.hive_sync.use_jdbc': 'false',
'hoodie.datasource.hive_sync.partition_extractor_class':
'org.apache.hudi.hive.MultiPartKeysValueExtractor',
'hoodie.datasource.write.hive_style_partitioning': 'true',
}
df = spark. \
read. \
format("hudi"). \
load(path)
print("\n")
print(df.show(2))
print("\n")
try:
query = """
call show_commits(hudidb.hudi_table);
"""
print(spark.sql(query).show())
except Exception as e:
print("Error 1", e)
try:
query = """
call show_commits();
"""
print(spark.sql(query).show())
except Exception as e:
print("Error 2", e)
try:
query = """
call show_commits(hudidb.hudi_table);
"""
print(spark.sql(query).show())
except Exception as e:
print("Error 1", e)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]