[
https://issues.apache.org/jira/browse/PHOENIX-52?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13905839#comment-13905839
]
Russell Jurney commented on PHOENIX-52:
---------------------------------------
Now I get this error, related to getMaster (see also PHOENIX-56, which has the
same problem):
2014-02-18 22:00:19,594 INFO
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation:
getMaster attempt 9 of 14 failed; retrying after sleep of 32078
java.io.IOException: Could not set up IO Streams
at
org.apache.hadoop.hbase.ipc.HBaseClient$Connection.setupIOstreams(HBaseClient.java:454)
at
org.apache.hadoop.hbase.ipc.HBaseClient.getConnection(HBaseClient.java:1133)
at org.apache.hadoop.hbase.ipc.HBaseClient.call(HBaseClient.java:980)
at
org.apache.hadoop.hbase.ipc.WritableRpcEngine$Invoker.invoke(WritableRpcEngine.java:86)
at com.sun.proxy.$Proxy14.getProtocolVersion(Unknown Source)
at
org.apache.hadoop.hbase.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:138)
at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getMaster(HConnectionManager.java:816)
at org.apache.hadoop.hbase.client.HBaseAdmin.<init>(HBaseAdmin.java:127)
at
com.salesforce.phoenix.query.ConnectionQueryServicesImpl.ensureTableCreated(ConnectionQueryServicesImpl.java:610)
at
com.salesforce.phoenix.query.ConnectionQueryServicesImpl.createTable(ConnectionQueryServicesImpl.java:1048)
at
com.salesforce.phoenix.schema.MetaDataClient.createTable(MetaDataClient.java:783)
at
com.salesforce.phoenix.schema.MetaDataClient.createTable(MetaDataClient.java:338)
at
com.salesforce.phoenix.compile.CreateTableCompiler$1.execute(CreateTableCompiler.java:83)
at
com.salesforce.phoenix.jdbc.PhoenixStatement$ExecutableCreateTableStatement.executeUpdate(PhoenixStatement.java:357)
at
com.salesforce.phoenix.jdbc.PhoenixStatement.executeUpdate(PhoenixStatement.java:1036)
at
com.salesforce.phoenix.query.ConnectionQueryServicesImpl.init(ConnectionQueryServicesImpl.java:1205)
at
com.salesforce.phoenix.jdbc.PhoenixDriver.getConnectionQueryServices(PhoenixDriver.java:87)
at
com.salesforce.phoenix.jdbc.PhoenixEmbeddedDriver.connect(PhoenixEmbeddedDriver.java:115)
at java.sql.DriverManager.getConnection(DriverManager.java:571)
at java.sql.DriverManager.getConnection(DriverManager.java:187)
at
com.salesforce.phoenix.pig.PhoenixPigConfiguration.getConnection(PhoenixPigConfiguration.java:100)
at
com.salesforce.phoenix.pig.hadoop.PhoenixOutputFormat.getConnection(PhoenixOutputFormat.java:97)
at
com.salesforce.phoenix.pig.hadoop.PhoenixOutputFormat.getRecordWriter(PhoenixOutputFormat.java:75)
at
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat.getRecordWriter(PigOutputFormat.java:84)
at
org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:548)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:653)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:330)
at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1408)
at org.apache.hadoop.mapred.Child.main(Child.java:262)
Caused by: java.lang.NoSuchMethodError:
org.apache.hadoop.net.NetUtils.getInputStream(Ljava/net/Socket;)Ljava/io/InputStream;
at
org.apache.hadoop.hbase.ipc.HBaseClient$Connection.setupIOstreams(HBaseClient.java:437)
... 31 more
> Pig 0.11 in CDH 4.4 can't connect via PhoenixHBaseStorage
> ---------------------------------------------------------
>
> Key: PHOENIX-52
> URL: https://issues.apache.org/jira/browse/PHOENIX-52
> Project: Phoenix
> Issue Type: Bug
> Affects Versions: 2.2.3
> Environment: Cloudera CDH 4.4/Pig 0.11
> Reporter: Russell Jurney
>
> 4-02-14 10:42:26,610 WARN org.apache.hadoop.conf.Configuration:
> dfs.http.address is deprecated. Instead, use dfs.namenode.http-address
> 2014-02-14 10:42:26,610 WARN org.apache.hadoop.conf.Configuration:
> dfs.name.dir.restore is deprecated. Instead, use dfs.namenode.name.dir.restore
> 2014-02-14 10:42:26,610 WARN org.apache.hadoop.conf.Configuration:
> dfs.https.client.keystore.resource is deprecated. Instead, use
> dfs.client.https.keystore.resource
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.backup.address is deprecated. Instead, use dfs.namenode.backup.address
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.backup.http.address is deprecated. Instead, use
> dfs.namenode.backup.http-address
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.permissions is deprecated. Instead, use dfs.permissions.enabled
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.safemode.extension is deprecated. Instead, use
> dfs.namenode.safemode.extension
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.datanode.max.xcievers is deprecated. Instead, use
> dfs.datanode.max.transfer.threads
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.https.need.client.auth is deprecated. Instead, use
> dfs.client.https.need-auth
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.https.address is deprecated. Instead, use dfs.namenode.https-address
> 2014-02-14 10:42:26,611 WARN org.apache.hadoop.conf.Configuration:
> dfs.replication.interval is deprecated. Instead, use
> dfs.namenode.replication.interval
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> fs.checkpoint.edits.dir is deprecated. Instead, use
> dfs.namenode.checkpoint.edits.dir
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> dfs.write.packet.size is deprecated. Instead, use dfs.client-write-packet-size
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> dfs.permissions.supergroup is deprecated. Instead, use
> dfs.permissions.superusergroup
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> topology.script.number.args is deprecated. Instead, use
> net.topology.script.number.args
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> dfs.umaskmode is deprecated. Instead, use fs.permissions.umask-mode
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> dfs.secondary.http.address is deprecated. Instead, use
> dfs.namenode.secondary.http-address
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> fs.checkpoint.period is deprecated. Instead, use
> dfs.namenode.checkpoint.period
> 2014-02-14 10:42:26,612 WARN org.apache.hadoop.conf.Configuration:
> topology.node.switch.mapping.impl is deprecated. Instead, use
> net.topology.node.switch.mapping.impl
> 2014-02-14 10:42:26,613 WARN org.apache.hadoop.conf.Configuration:
> io.bytes.per.checksum is deprecated. Instead, use dfs.bytes-per-checksum
> 2014-02-14 10:42:26,617 INFO org.apache.hadoop.mapred.TaskLogsTruncater:
> Initializing logs' truncater with mapRetainSize=-1 and reduceRetainSize=-1
> 2014-02-14 10:42:26,620 FATAL org.apache.hadoop.mapred.Child: Error running
> child : java.lang.IncompatibleClassChangeError: Found interface
> org.apache.hadoop.mapreduce.TaskAttemptContext, but class was expected
> at
> com.salesforce.phoenix.pig.hadoop.PhoenixOutputFormat.getRecordWriter(PhoenixOutputFormat.java:75)
> at
> org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat.getRecordWriter(PigOutputFormat.java:84)
> at
> org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:548)
> at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:653)
> at org.apache.hadoop.mapred.MapTask.run(MapTask.java:330)
> at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:415)
> at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1408)
> at org.apache.hadoop.mapred.Child.main(Child.java:262)
--
This message was sent by Atlassian JIRA
(v6.1.5#6160)