KarlManong opened a new issue #4090:
URL: https://github.com/apache/iceberg/issues/4090
related to #3763
```
java.lang.AssertionError:
Expected: an instance of org.apache.spark.SparkException
but: <java.lang.IllegalArgumentException: Failed to cleanly delete data
files matching: ref(name="id") == 1> is a java.lang.IllegalArgumentException
at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
at org.junit.Assert.assertThat(Assert.java:964)
at org.junit.Assert.assertThat(Assert.java:930)
at
org.apache.iceberg.spark.extensions.TestDelete.testDeleteWithSerializableIsolation(TestDelete.java:667)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
at
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
at
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
at org.junit.rules.ExternalResource$1.evaluate(ExternalResource.java:54)
at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
at
org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366)
at
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103)
at
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63)
at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
at
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
at
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
at
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.runTestClass(JUnitTestClassExecutor.java:110)
at
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.execute(JUnitTestClassExecutor.java:58)
at
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.execute(JUnitTestClassExecutor.java:38)
at
org.gradle.api.internal.tasks.testing.junit.AbstractJUnitTestClassProcessor.processTestClass(AbstractJUnitTestClassProcessor.java:62)
at
org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.processTestClass(SuiteTestClassProcessor.java:51)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:36)
at
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24)
at
org.gradle.internal.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:33)
at
org.gradle.internal.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:94)
at com.sun.proxy.$Proxy2.processTestClass(Unknown Source)
at
org.gradle.api.internal.tasks.testing.worker.TestWorker$2.run(TestWorker.java:176)
at
org.gradle.api.internal.tasks.testing.worker.TestWorker.executeAndMaintainThreadName(TestWorker.java:129)
at
org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:100)
at
org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:60)
at
org.gradle.process.internal.worker.child.ActionExecutionWorker.execute(ActionExecutionWorker.java:56)
at
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:133)
at
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:71)
at
worker.org.gradle.process.internal.worker.GradleWorkerMain.run(GradleWorkerMain.java:69)
at
worker.org.gradle.process.internal.worker.GradleWorkerMain.main(GradleWorkerMain.java:74)
------- Stderr: -------
[Test worker] INFO org.apache.spark.sql.hive.HiveUtils - Initializing
HiveMetastoreConnection version 2.3.9 using Spark classes.
[Test worker] INFO org.apache.hadoop.hive.conf.HiveConf - Found
configuration file null
[Test worker] INFO org.apache.spark.sql.hive.client.HiveClientImpl -
Warehouse location for Hive client (version 2.3.9) is
file:/opt/buildagent/work/7dc556d552c231ec/spark/v3.2/spark-extensions/spark-warehouse
[Test worker] INFO hive.metastore - Trying to connect to metastore with URI
thrift://localhost:36735
[Test worker] INFO hive.metastore - Opened a connection to metastore,
current connections: 1
[Test worker] INFO hive.metastore - Connected to metastore.
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: Opening raw store with implementation
class:org.apache.hadoop.hive.metastore.ObjectStore
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.ObjectStore -
ObjectStore, initialize called
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.ObjectStore -
Initialized ObjectStore
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_database: global_temp
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_database: global_temp
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 create_database: Database(name:default, description:,
locationUri:file:/opt/buildagent/work/7dc556d552c231ec/spark/v3.2/spark-extensions/spark-warehouse/default.db,
parameters:{}, ownerName:root)
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 create_database: Database(name:default,
description:,
locationUri:file:/opt/buildagent/work/7dc556d552c231ec/spark/v3.2/spark-extensions/spark-warehouse/default.db,
parameters:{}, ownerName:root)
[pool-306-thread-1] ERROR
org.apache.hadoop.hive.metastore.RetryingHMSHandler -
AlreadyExistsException(message:Database default already exists)
at
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.create_database(HiveMetaStore.java:925)
at sun.reflect.GeneratedMethodAccessor151.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:148)
at
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:107)
at com.sun.proxy.$Proxy23.create_database(Unknown Source)
at
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_database.getResult(ThriftHiveMetastore.java:10787)
at
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$create_database.getResult(ThriftHiveMetastore.java:10771)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:38)
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
at
org.apache.hadoop.hive.metastore.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:48)
at
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:310)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
[Test worker] INFO hive.metastore - Trying to connect to metastore with URI
thrift://localhost:36735
[Test worker] INFO hive.metastore - Opened a connection to metastore,
current connections: 7
[Test worker] INFO hive.metastore - Connected to metastore.
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: Opening raw store with implementation
class:org.apache.hadoop.hive.metastore.ObjectStore
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.ObjectStore -
ObjectStore, initialize called
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.ObjectStore -
Initialized ObjectStore
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_database: default
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_database: default
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 create_table: Table(tableName:table, dbName:default,
owner:root, createTime:-425858, lastAccessTime:-425858, retention:2147483647,
sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null),
FieldSchema(name:dep, type:string, comment:null)],
location:file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table,
inputFormat:org.apache.hadoop.mapred.FileInputFormat,
outputFormat:org.apache.hadoop.mapred.FileOutputFormat, compressed:false,
numBuckets:0, serdeInfo:SerDeInfo(name:null,
serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,
parameters:{}), bucketCols:null, sortCols:null, parameters:null),
partitionKeys:[], parameters:{owner=root, EXTERNAL=TRUE,
metadata_location=file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00000-edc3e826-d5e6-45b9-b6ef-983cc5bf295a.metadata.json,
uuid=0757090e-d4a2-4e69-b25a-8aab316e44ee, table_type=I
CEBERG}, viewOriginalText:null, viewExpandedText:null,
tableType:EXTERNAL_TABLE)
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 create_table: Table(tableName:table,
dbName:default, owner:root, createTime:-425858, lastAccessTime:-425858,
retention:2147483647, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int,
comment:null), FieldSchema(name:dep, type:string, comment:null)],
location:file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table,
inputFormat:org.apache.hadoop.mapred.FileInputFormat,
outputFormat:org.apache.hadoop.mapred.FileOutputFormat, compressed:false,
numBuckets:0, serdeInfo:SerDeInfo(name:null,
serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,
parameters:{}), bucketCols:null, sortCols:null, parameters:null),
partitionKeys:[], parameters:{owner=root, EXTERNAL=TRUE,
metadata_location=file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00000-edc3e826-d5e6-45b9-b6ef-983cc5bf295a.metadata.json,
uuid=0757090e-d4a2-4e69-b25
a-8aab316e44ee, table_type=ICEBERG}, viewOriginalText:null,
viewExpandedText:null, tableType:EXTERNAL_TABLE)
[pool-306-thread-2] INFO hive.log - Updating table stats fast for table
[pool-306-thread-2] INFO hive.log - Updated size of table table to 1197
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 518 ms
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00000-edc3e826-d5e6-45b9-b6ef-983cc5bf295a.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 289 ms
[Test worker] WARN org.apache.iceberg.BaseTransaction - Failed to load
metadata for a committed snapshot, skipping clean-up
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00001-15b51853-81d7-44a2-91a8-f77c0a2aac54.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 303 ms
[Test worker] WARN org.apache.iceberg.BaseTransaction - Failed to load
metadata for a committed snapshot, skipping clean-up
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00002-b5952f42-7f0f-4826-a123-5e7affbc6804.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 252 ms
[Test worker] WARN org.apache.iceberg.BaseTransaction - Failed to load
metadata for a committed snapshot, skipping clean-up
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00003-80dc73f1-e63a-452b-a36e-65a19b6993c4.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 235 ms
[Test worker] WARN org.apache.iceberg.BaseTransaction - Failed to load
metadata for a committed snapshot, skipping clean-up
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00004-cb27e370-4fd4-43cc-a567-0c8f18c6149b.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 243 ms
[Test worker] WARN org.apache.iceberg.BaseTransaction - Failed to load
metadata for a committed snapshot, skipping clean-up
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-328-thread-1] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00005-1ffc210a-54f2-4851-bf98-6bcd91b0cc33.metadata.json
[pool-328-thread-2] INFO
org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator - Code
generated in 4.947589 ms
[pool-328-thread-2] INFO org.apache.spark.storage.memory.MemoryStore - Block
broadcast_0 stored as values in memory (estimated size 231.3 KiB, free 273.1
MiB)
[pool-328-thread-2] INFO org.apache.spark.storage.memory.MemoryStore - Block
broadcast_0_piece0 stored as bytes in memory (estimated size 28.3 KiB, free
273.0 MiB)
[dispatcher-BlockManagerMaster] INFO
org.apache.spark.storage.BlockManagerInfo - Added broadcast_0_piece0 in memory
on abf2d5870077:37061 (size: 28.3 KiB, free: 273.3 MiB)
[pool-328-thread-2] INFO org.apache.spark.SparkContext - Created broadcast 0
from broadcast at SparkWrite.java:173
[pool-328-thread-2] INFO
org.apache.spark.sql.execution.datasources.v2.AppendDataExec - Start processing
data source write support: IcebergBatchWrite(table=testhive.default.table,
format=ORC). The input RDD has 1 partitions.
[pool-328-thread-2] INFO org.apache.spark.SparkContext - Starting job:
append at TestDelete.java:653
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Got job 0 (append at TestDelete.java:653) with 1 output partitions
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Final stage: ResultStage 0 (append at TestDelete.java:653)
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Parents of final stage: List()
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Missing parents: List()
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Submitting ResultStage 0 (CoalescedRDD[2] at append at TestDelete.java:653),
which has no missing parents
[dag-scheduler-event-loop] INFO org.apache.spark.storage.memory.MemoryStore
- Block broadcast_1 stored as values in memory (estimated size 7.6 KiB, free
273.0 MiB)
[dag-scheduler-event-loop] INFO org.apache.spark.storage.memory.MemoryStore
- Block broadcast_1_piece0 stored as bytes in memory (estimated size 4.1 KiB,
free 273.0 MiB)
[dispatcher-BlockManagerMaster] INFO
org.apache.spark.storage.BlockManagerInfo - Added broadcast_1_piece0 in memory
on abf2d5870077:37061 (size: 4.1 KiB, free: 273.3 MiB)
[dag-scheduler-event-loop] INFO org.apache.spark.SparkContext - Created
broadcast 1 from broadcast at DAGScheduler.scala:1427
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Submitting 1 missing tasks from ResultStage 0 (CoalescedRDD[2] at append at
TestDelete.java:653) (first 15 tasks are for partitions Vector(0))
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.TaskSchedulerImpl
- Adding task set 0.0 with 1 tasks resource profile 0
[dispatcher-event-loop-1] INFO org.apache.spark.scheduler.TaskSetManager -
Starting task 0.0 in stage 0.0 (TID 0) (abf2d5870077, executor driver,
partition 0, PROCESS_LOCAL, 4968 bytes) taskResourceAssignments Map()
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.spark.executor.Executor - Running task 0.0 in stage 0.0 (TID 0)
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.orc.impl.PhysicalFsWriter - ORC writer created for path:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/data/00000-0-98b9b7a3-200a-41c6-8377-cfacff626b24-00001.orc
with stripeSize: 67108864 blockSize: 268435456 compression: Compress: ZLIB
buffer: 262144
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.orc.impl.WriterImpl - ORC writer created for path:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/data/00000-0-98b9b7a3-200a-41c6-8377-cfacff626b24-00001.orc
with stripeSize: 67108864 options: Compress: ZLIB buffer: 262144
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.spark.sql.execution.datasources.v2.DataWritingSparkTask - Commit
authorized for partition 0 (task 0, attempt 0, stage 0.0)
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.spark.sql.execution.datasources.v2.DataWritingSparkTask - Committed
partition 0 (task 0, attempt 0, stage 0.0)
[Executor task launch worker for task 0.0 in stage 0.0 (TID 0)] INFO
org.apache.spark.executor.Executor - Finished task 0.0 in stage 0.0 (TID 0).
3599 bytes result sent to driver
[task-result-getter-0] INFO org.apache.spark.scheduler.TaskSetManager -
Finished task 0.0 in stage 0.0 (TID 0) in 15 ms on abf2d5870077 (executor
driver) (1/1)
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
ResultStage 0 (append at TestDelete.java:653) finished in 0.017 s
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.DAGScheduler -
Job 0 is finished. Cancelling potential speculative or zombie tasks for this job
[task-result-getter-0] INFO org.apache.spark.scheduler.TaskSchedulerImpl -
Removed TaskSet 0.0, whose tasks have all completed, from pool
[dag-scheduler-event-loop] INFO org.apache.spark.scheduler.TaskSchedulerImpl
- Killing all running tasks in stage 0: Stage finished
[pool-328-thread-2] INFO org.apache.spark.scheduler.DAGScheduler - Job 0
finished: append at TestDelete.java:653, took 0.018251 s
[pool-328-thread-2] INFO
org.apache.spark.sql.execution.datasources.v2.AppendDataExec - Data source
write support IcebergBatchWrite(table=testhive.default.table, format=ORC) is
committing.
[pool-328-thread-2] INFO org.apache.iceberg.spark.source.SparkWrite -
Committing append with 1 new data files to table testhive.default.table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-328-thread-1] INFO org.apache.iceberg.BaseTableScan - Scanning empty
table testhive.default.table
[pool-328-thread-1] INFO
org.apache.spark.sql.execution.datasources.v2.V2ScanRelationPushDown -
Output: id#58899, dep#58900
[pool-328-thread-1] INFO hive.metastore - Trying to connect to metastore
with URI thrift://localhost:36735
[pool-328-thread-1] INFO hive.metastore - Opened a connection to metastore,
current connections: 8
[pool-328-thread-1] INFO hive.metastore - Connected to metastore.
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: Opening raw store with implementation
class:org.apache.hadoop.hive.metastore.ObjectStore
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.ObjectStore -
ObjectStore, initialize called
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.ObjectStore -
Initialized ObjectStore
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 alter_table: db=default tbl=table newtbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 alter_table: db=default tbl=table
newtbl=table
[pool-328-thread-2] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Successfully committed to table testhive.default.table in 259 ms
[pool-328-thread-2] INFO org.apache.iceberg.SnapshotProducer - Committed
snapshot 275387162142555038 (MergeAppend)
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-328-thread-2] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00006-79e10616-d999-42e1-a5e5-c9c7a823c797.metadata.json
[pool-306-thread-2] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
37: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-2] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-328-thread-2] INFO org.apache.iceberg.spark.source.SparkWrite -
Committed in 311 ms
[pool-328-thread-2] INFO
org.apache.spark.sql.execution.datasources.v2.AppendDataExec - Data source
write support IcebergBatchWrite(table=testhive.default.table, format=ORC)
committed.
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-328-thread-1] WARN org.apache.iceberg.util.Tasks - Retrying task after
failure: Base metadata location
'file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00005-1ffc210a-54f2-4851-bf98-6bcd91b0cc33.metadata.json'
is not same as the current table metadata location
'file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00006-79e10616-d999-42e1-a5e5-c9c7a823c797.metadata.json'
for default.table
org.apache.iceberg.exceptions.CommitFailedException: Base metadata location
'file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00005-1ffc210a-54f2-4851-bf98-6bcd91b0cc33.metadata.json'
is not same as the current table metadata location
'file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00006-79e10616-d999-42e1-a5e5-c9c7a823c797.metadata.json'
for default.table
at
org.apache.iceberg.hive.HiveTableOperations.doCommit(HiveTableOperations.java:249)
at
org.apache.iceberg.BaseMetastoreTableOperations.commit(BaseMetastoreTableOperations.java:127)
at
org.apache.iceberg.SnapshotProducer.lambda$commit$2(SnapshotProducer.java:300)
at
org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:404)
at
org.apache.iceberg.util.Tasks$Builder.runSingleThreaded(Tasks.java:214)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:198)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:190)
at org.apache.iceberg.SnapshotProducer.commit(SnapshotProducer.java:282)
at
org.apache.iceberg.spark.source.SparkTable.deleteWhere(SparkTable.java:269)
at
org.apache.spark.sql.execution.datasources.v2.DeleteFromTableExec.run(DeleteFromTableExec.scala:31)
at
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
at
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
at
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:110)
at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:110)
at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:106)
at
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:481)
at
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:82)
at
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:481)
at
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:30)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
at
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
at
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
at
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:457)
at
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:106)
at
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:93)
at
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:91)
at org.apache.spark.sql.Dataset.(Dataset.scala:219)
at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:99)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
at
org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:618)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:613)
at org.apache.iceberg.spark.SparkTestBase.sql(SparkTestBase.java:95)
at
org.apache.iceberg.spark.extensions.TestDelete.lambda$testDeleteWithSerializableIsolation$3(TestDelete.java:635)
at
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.BaseMetastoreTableOperations -
Refreshing table metadata from new version:
file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/00006-79e10616-d999-42e1-a5e5-c9c7a823c797.metadata.json
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=table
[pool-306-thread-3] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
38: source:127.0.0.1 drop_table : db=default tbl=table
[pool-306-thread-3] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 drop_table : db=default tbl=table
[Test worker] INFO org.apache.iceberg.CatalogUtil - Manifests to delete:
GenericManifestFile{path=file:/opt/buildagent/temp/buildTmp/hive6005160705013182790/table/metadata/c1854833-f979-44d0-b098-1d25e5229fbd-m0.avro,
length=6526, partition_spec_id=0, added_snapshot_id=275387162142555038,
added_data_files_count=1, added_rows_count=2, existing_data_files_count=0,
existing_rows_count=0, deleted_data_files_count=0, deleted_rows_count=0,
partitions=[], key_metadata=null}
[Test worker] INFO org.apache.iceberg.hive.HiveCatalog - Dropped table:
default.table
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_table : db=default tbl=deleted_id
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=deleted_id
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_database: default
[pool-306-thread-1] INFO org.apache.hadoop.hive.metastore.HiveMetaStore -
36: source:127.0.0.1 get_table : db=default tbl=deleted_dep
[pool-306-thread-1] INFO
org.apache.hadoop.hive.metastore.HiveMetaStore.audit - ugi=root
ip=127.0.0.1 cmd=source:127.0.0.1 get_table : db=default tbl=deleted_dep
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]