[ 
https://issues.apache.org/jira/browse/PHOENIX-5619?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Toshihiro Suzuki updated PHOENIX-5619:
--------------------------------------
    Environment: HDP-3.1.0

> CREATE TABLE AS SELECT for Phoenix table doesn't work correctly in Hive
> -----------------------------------------------------------------------
>
>                 Key: PHOENIX-5619
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-5619
>             Project: Phoenix
>          Issue Type: Bug
>         Environment: HDP-3.1.0
>            Reporter: Toshihiro Suzuki
>            Assignee: Toshihiro Suzuki
>            Priority: Major
>          Time Spent: 10m
>  Remaining Estimate: 0h
>
> The steps to reproduce are as follows:
> 1. Create a table in Phoenix:
> {code:java}
> CREATE TABLE TEST (ID VARCHAR PRIMARY KEY, COL VARCHAR);
> {code}
> 2. Create a table in Hive that's based on the table in Phoenix created in the 
> step 1:
> {code:java}
> CREATE EXTERNAL TABLE test (id STRING, col STRING)
> STORED BY 'org.apache.phoenix.hive.PhoenixStorageHandler'
> TBLPROPERTIES (
>   "phoenix.table.name" = "TEST",
>   "phoenix.zookeeper.quorum" = "<quorum string>",
>   "phoenix.zookeeper.znode.parent" = "/hbase-unsecure",
>   "phoenix.zookeeper.client.port" = "2181",
>   "phoenix.rowkeys" = "ID",
>   "phoenix.column.mapping" = "id:ID, col:COL"
> );
> {code}
> 3. Intert data to the Hive table in Hive:
> {code:java}
> INSERT INTO TABLE test VALUES ('id', 'col');
> {code}
> 4. Run CREATE TABLE AS SELECT in Hive
> {code:java}
> CREATE TABLE test2 AS SELECT * from test;
> {code}
>  
> After the step 4, I face the following error:
> {code:java}
> 2019-12-13 08:22:20,963 [DEBUG] [TezChild] |client.RpcRetryingCallerImpl|: 
> Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, 
> msg=org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = 
> NoNode for /hbase/meta-region-server, details=row 'SYSTEM:CATALOG' on table 
> 'hbase:meta' at null, exception=java.io.IOException: 
> org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = 
> NoNode for /hbase/meta-region-server
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.get(ConnectionImplementation.java:2009)
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.locateMeta(ConnectionImplementation.java:785)
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:752)
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:741)
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:712)
>       at 
> org.apache.hadoop.hbase.client.ConnectionImplementation.getRegionLocation(ConnectionImplementation.java:594)
>       at 
> org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:72)
>       at 
> org.apache.hadoop.hbase.client.RegionServerCallable.prepare(RegionServerCallable.java:223)
>       at 
> org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:105)
>       at org.apache.hadoop.hbase.client.HTable.get(HTable.java:386)
>       at org.apache.hadoop.hbase.client.HTable.get(HTable.java:360)
>       at 
> org.apache.hadoop.hbase.MetaTableAccessor.getTableState(MetaTableAccessor.java:1066)
>       at 
> org.apache.hadoop.hbase.MetaTableAccessor.tableExists(MetaTableAccessor.java:389)
>       at 
> org.apache.hadoop.hbase.client.HBaseAdmin$6.rpcCall(HBaseAdmin.java:441)
>       at 
> org.apache.hadoop.hbase.client.HBaseAdmin$6.rpcCall(HBaseAdmin.java:438)
>       at 
> org.apache.hadoop.hbase.client.RpcRetryingCallable.call(RpcRetryingCallable.java:58)
>       at 
> org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:107)
>       at 
> org.apache.hadoop.hbase.client.HBaseAdmin.executeCallable(HBaseAdmin.java:3080)
>       at 
> org.apache.hadoop.hbase.client.HBaseAdmin.executeCallable(HBaseAdmin.java:3072)
>       at 
> org.apache.hadoop.hbase.client.HBaseAdmin.tableExists(HBaseAdmin.java:438)
>       at 
> org.apache.phoenix.query.ConnectionQueryServicesImpl.ensureTableCreated(ConnectionQueryServicesImpl.java:1106)
>       at 
> org.apache.phoenix.query.ConnectionQueryServicesImpl.createTable(ConnectionQueryServicesImpl.java:1502)
>       at 
> org.apache.phoenix.schema.MetaDataClient.createTableInternal(MetaDataClient.java:2740)
>       at 
> org.apache.phoenix.schema.MetaDataClient.createTable(MetaDataClient.java:1114)
>       at 
> org.apache.phoenix.compile.CreateTableCompiler$1.execute(CreateTableCompiler.java:192)
>       at 
> org.apache.phoenix.jdbc.PhoenixStatement$2.call(PhoenixStatement.java:408)
>       at 
> org.apache.phoenix.jdbc.PhoenixStatement$2.call(PhoenixStatement.java:391)
>       at org.apache.phoenix.call.CallRunner.run(CallRunner.java:53)
>       at 
> org.apache.phoenix.jdbc.PhoenixStatement.executeMutation(PhoenixStatement.java:390)
>       at 
> org.apache.phoenix.jdbc.PhoenixStatement.executeMutation(PhoenixStatement.java:378)
>       at 
> org.apache.phoenix.jdbc.PhoenixStatement.executeUpdate(PhoenixStatement.java:1806)
>       at 
> org.apache.phoenix.query.ConnectionQueryServicesImpl$12.call(ConnectionQueryServicesImpl.java:2570)
>       at 
> org.apache.phoenix.query.ConnectionQueryServicesImpl$12.call(ConnectionQueryServicesImpl.java:2533)
>       at 
> org.apache.phoenix.util.PhoenixContextExecutor.call(PhoenixContextExecutor.java:76)
>       at 
> org.apache.phoenix.query.ConnectionQueryServicesImpl.init(ConnectionQueryServicesImpl.java:2533)
>       at 
> org.apache.phoenix.jdbc.PhoenixDriver.getConnectionQueryServices(PhoenixDriver.java:255)
>       at 
> org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.createConnection(PhoenixEmbeddedDriver.java:150)
>       at org.apache.phoenix.jdbc.PhoenixDriver.connect(PhoenixDriver.java:221)
>       at java.sql.DriverManager.getConnection(DriverManager.java:664)
>       at java.sql.DriverManager.getConnection(DriverManager.java:208)
>       at 
> org.apache.phoenix.hive.util.PhoenixConnectionUtil.getConnection(PhoenixConnectionUtil.java:99)
>       at 
> org.apache.phoenix.hive.util.PhoenixConnectionUtil.getInputConnection(PhoenixConnectionUtil.java:62)
>       at 
> org.apache.phoenix.hive.util.PhoenixUtil.getPrimaryKeyColumnList(PhoenixUtil.java:112)
>       at 
> org.apache.phoenix.hive.mapreduce.PhoenixResultWritable.setConf(PhoenixResultWritable.java:208)
>       at 
> org.apache.phoenix.hive.mapreduce.PhoenixResultWritable.<init>(PhoenixResultWritable.java:69)
>       at 
> org.apache.phoenix.hive.mapreduce.PhoenixResultWritable.<init>(PhoenixResultWritable.java:74)
>       at 
> org.apache.phoenix.hive.PhoenixSerializer.<init>(PhoenixSerializer.java:82)
>       at org.apache.phoenix.hive.PhoenixSerDe.initialize(PhoenixSerDe.java:86)
>       at 
> org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:54)
>       at 
> org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:540)
>       at 
> org.apache.hadoop.hive.ql.plan.PartitionDesc.getDeserializer(PartitionDesc.java:184)
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator$RowDeserializePartitionContext.init(VectorMapOperator.java:359)
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.createAndInitPartitionContext(VectorMapOperator.java:423)
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.internalSetChildren(VectorMapOperator.java:570)
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.setChildren(VectorMapOperator.java:482)
>       at 
> org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor.init(MapRecordProcessor.java:293)
>       at 
> org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:266)
>       at 
> org.apache.hadoop.hive.ql.exec.tez.TezProcessor.run(TezProcessor.java:250)
>       at 
> org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.run(LogicalIOProcessorRuntimeTask.java:374)
>       at 
> org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:73)
>       at 
> org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:61)
>       at java.security.AccessController.doPrivileged(Native Method)
>       at javax.security.auth.Subject.doAs(Subject.java:422)
>       at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
>       at 
> org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:61)
>       at 
> org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:37)
>       at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
>       at 
> com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:108)
>       at 
> com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:41)
>       at 
> com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:77)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>       at java.lang.Thread.run(Thread.java:745)
> Caused by: org.apache.zookeeper.KeeperException$NoNodeException: 
> KeeperErrorCode = NoNode for /hbase/meta-region-server
>       at org.apache.zookeeper.KeeperException.create(KeeperException.java:111)
>       at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
>       at 
> org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$ZKTask$1.exec(ReadOnlyZKClient.java:168)
>       at 
> org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient.run(ReadOnlyZKClient.java:323)
>       ... 1 more
> {code}
> The correct zookeeper znode parent is "/hbase-unsecure" in this case, but it 
> looks like the default value "/hbase" is used.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to