[ 
https://issues.apache.org/jira/browse/PHOENIX-4354?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Akshita Malhotra reassigned PHOENIX-4354:
-----------------------------------------

    Assignee: Akshita Malhotra

> Mappers fails in Snapshot based index rebuilding job
> ----------------------------------------------------
>
>                 Key: PHOENIX-4354
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-4354
>             Project: Phoenix
>          Issue Type: Bug
>    Affects Versions: 4.13.0
>            Reporter: Monani Mihir
>            Assignee: Akshita Malhotra
>
> Cmd to run snapshot based index job :-
> bin/hbase org.apache.phoenix.mapreduce.index.IndexTool -it DATA_INDEX -dt 
> DATA -s SCHEMA -snap -op /TEST/DATA_INDEX
> {code}
> 2017-11-06 09:25:25,054 WARN  [oreSnapshot-pool6-t1] backup.HFileArchiver - 
> Failed to archive class 
> org.apache.hadoop.hbase.backup.HFileArchiver$FileablePath, 
> file:hdfs://hdfs-local/index-snapshot-dir/restore-dir/ed465e0f-002e-43b3-8ec4-133e81c4e3ea/data/default/SCHEMA.DATA/0b93e3fcba18cf281cc147a08fc4656f/0/SCHEMA.DATA=0b93e3fcba18cf281cc147a08fc4656f-14aa829f6e63460fab309cd1f32b9627
>  on try #2
> java.io.FileNotFoundException: File/Directory 
> /index-snapshot-dir/restore-dir/ed465e0f-002e-43b3-8ec4-133e81c4e3ea/data/default/SCHEMA.DATA/0b93e3fcba18cf281cc147a08fc4656f/0/SCHEMA.DATA=0b93e3fcba18cf281cc147a08fc4656f-14aa829f6e63460fab309cd1f32b9627
>  does not exist.
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSDirAttrOp.setTimes(FSDirAttrOp.java:123)
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.setTimes(FSNamesystem.java:1921)
>     at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.setTimes(NameNodeRpcServer.java:1223)
>     at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.setTimes(ClientNamenodeProtocolServerSideTranslatorPB.java:915)
>     at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>     at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
>     at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2216)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2212)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1751)
>     at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2210)
>     at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>     at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>     at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>     at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>     at 
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
>     at 
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
>     at org.apache.hadoop.hdfs.DFSClient.setTimes(DFSClient.java:3167)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$31.doCall(DistributedFileSystem.java:1548)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$31.doCall(DistributedFileSystem.java:1544)
>     at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.setTimes(DistributedFileSystem.java:1544)
>     at 
> org.apache.hadoop.hbase.util.FSUtils.renameAndSetModifyTime(FSUtils.java:1964)
>     at 
> org.apache.hadoop.hbase.backup.HFileArchiver$File.moveAndClose(HFileArchiver.java:586)
>     at 
> org.apache.hadoop.hbase.backup.HFileArchiver.resolveAndArchiveFile(HFileArchiver.java:425)
>     at 
> org.apache.hadoop.hbase.backup.HFileArchiver.archiveStoreFile(HFileArchiver.java:260)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.restoreRegion(RestoreSnapshotHelper.java:445)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.access$300(RestoreSnapshotHelper.java:110)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper$2.editRegion(RestoreSnapshotHelper.java:393)
>     at 
> org.apache.hadoop.hbase.util.ModifyRegionUtils$2.call(ModifyRegionUtils.java:215)
>     at 
> org.apache.hadoop.hbase.util.ModifyRegionUtils$2.call(ModifyRegionUtils.java:212)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> Caused by: 
> org.apache.hadoop.ipc.RemoteException(java.io.FileNotFoundException): 
> File/Directory 
> /index-snapshot-dir/restore-dir/ed465e0f-002e-43b3-8ec4-133e81c4e3ea/data/default/SCHEMA.DATA/0b93e3fcba18cf281cc147a08fc4656f/0/SCHEMA.DATA=0b93e3fcba18cf281cc147a08fc4656f-14aa829f6e63460fab309cd1f32b9627
>  does not exist.
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSDirAttrOp.setTimes(FSDirAttrOp.java:123)
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.setTimes(FSNamesystem.java:1921)
>     at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.setTimes(NameNodeRpcServer.java:1223)
>     at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.setTimes(ClientNamenodeProtocolServerSideTranslatorPB.java:915)
>     at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>     at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
>     at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2216)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2212)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1751)
>     at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2210)
>     at org.apache.hadoop.ipc.Client.call(Client.java:1476)
>     at org.apache.hadoop.ipc.Client.call(Client.java:1413)
>     at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
>     at com.sun.proxy.$Proxy13.setTimes(Unknown Source)
>     at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.setTimes(ClientNamenodeProtocolTranslatorPB.java:854)
>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>     at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>     at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>     at java.lang.reflect.Method.invoke(Method.java:498)
>     at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:193)
>     at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
>     at com.sun.proxy.$Proxy14.setTimes(Unknown Source)
>     at org.apache.hadoop.hdfs.DFSClient.setTimes(DFSClient.java:3165)
>     ... 19 more
> 2017-11-06 09:25:25,053 ERROR [oreSnapshot-pool6-t2] io.HFileLink - couldn't 
> create the 
> link=SCHEMA.DATA=0bafcff0f38e0f9bf98715f88a36d6ce-4e96d6a1cb6a460ca122b8215e23300c
>  for 
> hdfs://hdfs-local/index-snapshot-dir/restore-dir/ed465e0f-002e-43b3-8ec4-133e81c4e3ea/data/default/SCHEMA.DATA/0bafcff0f38e0f9bf98715f88a36d6ce/0
> org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException):
>  Failed to CREATE_FILE 
> /index-snapshot-dir/restore-dir/ed465e0f-002e-43b3-8ec4-133e81c4e3ea/data/default/SCHEMA.DATA/0bafcff0f38e0f9bf98715f88a36d6ce/0/SCHEMA.DATA=0bafcff0f38e0f9bf98715f88a36d6ce-4e96d6a1cb6a460ca122b8215e23300c
>  for DFSClient_attempt_1508241002000_5658_m_000015_0_201511203_1 on 
> 10.231.90.26 because this file lease is currently owned by 
> DFSClient_attempt_1508241002000_5658_m_000016_0_-1888081567_1 on 10.231.90.25
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLeaseInternal(FSNamesystem.java:2950)
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2580)
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2469)
>     at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2352)
>     at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:624)
>     at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:397)
>     at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>     at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
>     at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2216)
>     at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2212)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1751)
>     at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2210)
>     at org.apache.hadoop.ipc.Client.call(Client.java:1476)
>     at org.apache.hadoop.ipc.Client.call(Client.java:1413)
>     at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
>     at com.sun.proxy.$Proxy13.create(Unknown Source)
>     at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.create(ClientNamenodeProtocolTranslatorPB.java:296)
>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>     at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>     at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>     at java.lang.reflect.Method.invoke(Method.java:498)
>     at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:193)
>     at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
>     at com.sun.proxy.$Proxy14.create(Unknown Source)
>     at 
> org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1811)
>     at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1714)
>     at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1649)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$7.doCall(DistributedFileSystem.java:451)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$7.doCall(DistributedFileSystem.java:447)
>     at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:447)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:390)
>     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:911)
>     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:892)
>     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:854)
>     at org.apache.hadoop.fs.FileSystem.createNewFile(FileSystem.java:1154)
>     at org.apache.hadoop.hbase.io.HFileLink.create(HFileLink.java:385)
>     at org.apache.hadoop.hbase.io.HFileLink.create(HFileLink.java:317)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.restoreStoreFile(RestoreSnapshotHelper.java:583)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.restoreRegion(RestoreSnapshotHelper.java:452)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.access$300(RestoreSnapshotHelper.java:110)
>     at 
> org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper$2.editRegion(RestoreSnapshotHelper.java:393)
>     at 
> org.apache.hadoop.hbase.util.ModifyRegionUtils$2.call(ModifyRegionUtils.java:215)
>     at 
> org.apache.hadoop.hbase.util.ModifyRegionUtils$2.call(ModifyRegionUtils.java:212)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to