dev 
 
  e_carbon.prod_inst_carbon5  amount of data 1.7 Billion 

select count(*) from   e_carbon.prod_inst_carbon5 where 
ind_prod_inst_id='13623'  amount of data  One hundred thousand


cc.sql("update e_carbon.prod_inst_carbon5 set (remark)=('9999') where 
ind_prod_inst_id='13623'")
Error message
17/06/15 09:38:50 INFO DAGScheduler: Job 65 finished: show at <console>:31, 
took 36.347014 s
17/06/15 09:38:50 AUDIT deleteExecution$: [HETL032][e_carbon][Thread-1]Delete 
data operation is failed for e_carbon.prod_inst_carbon5
17/06/15 09:38:50 ERROR deleteExecution$: main Delete data operation is failed 
due to failure in creating delete delta file for segment : null block : null
17/06/15 09:38:50 ERROR ProjectForUpdateCommand$: main Exception in update 
operationjava.lang.Exception: Multiple input rows matched for same row.
17/06/15 09:38:50 INFO MapPartitionsRDD: Removing RDD 822 from persistence list
17/06/15 09:38:50 INFO BlockManager: Removing RDD 822
17/06/15 09:38:50 INFO HdfsFileLock: main Deleted the lock file 
hdfs://ns1/user/e_carbon/private/carbon.store/e_carbon/prod_inst_carbon5/meta.lock
17/06/15 09:38:50 INFO CarbonLockUtil: main Metadata lock has been successfully 
released
java.lang.RuntimeException: Update operation failed. Multiple input rows 
matched for same row.
at scala.sys.package$.error(package.scala:27)
at 
org.apache.spark.sql.execution.command.ProjectForUpdateCommand.run(IUDCommands.scala:236)
at 
org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
at 
org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
at org.apache.spark.sql.execution.ExecutedCommand.executeTake(commands.scala:67)
at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
at 
org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)
at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)
at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)
at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)
at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)
at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1414)
at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)
at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1495)
at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:171)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:394)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:355)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:363)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
at $iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
at $iwC$$iwC$$iwC.<init>(<console>:44)
at $iwC$$iwC.<init>(<console>:46)
at $iwC.<init>(<console>:48)
at <init>(<console>:50)


if  select count(*) from   e_carbon.prod_inst_carbon5 where 
ind_prod_inst_id='13623'  amount of data  two thousand
update 
success



yixu2001

Reply via email to