[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-08-09 Thread Oleksiy Sayankin (JIRA)


 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Oleksiy Sayankin updated HIVE-19587:

Affects Version/s: 3.1.0

> HeartBeat thread uses cancelled delegation token while connecting to meta on 
> KERBEROS cluster
> -
>
> Key: HIVE-19587
> URL: https://issues.apache.org/jira/browse/HIVE-19587
> Project: Hive
>  Issue Type: Bug
>  Components: Transactions
>Affects Versions: 3.0.0, 2.3.2, 3.1.0
>Reporter: Oleksiy Sayankin
>Assignee: Oleksiy Sayankin
>Priority: Blocker
> Attachments: HIVE-19587.1.patch
>
>
> *STEP 1. Create test data*
> {code}
> create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> {code}
> Generate 10 000 000 lines of random data
> {code}
> package com.test.app;
> import java.io.FileNotFoundException;
> import java.io.PrintWriter;
> import java.util.concurrent.ThreadLocalRandom;
> public class App {
>   public static void main(String[] args) throws FileNotFoundException {
> try (PrintWriter out = new PrintWriter("table.data");) {
>   int min = 0;
>   int max = 10_000;
>   int numRows = 10_000_000;
>   for (int i = 0; i <= numRows - 1; i++){
> int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
> out.println(randomNum);
>   }
> }
>   }
> }
> {code}
> Upload data to Hive tables
> {code}
> load data local inpath '/home/myuser/table.data' into table t1;
> load data local inpath '/home/myuser/table.data' into table t2;
> {code}
> *STEP 2. Configure transactions in hive-site.xml*
> {code}
> 
>
>   hive.exec.dynamic.partition.mode
>   nonstrict
>
>
>   hive.support.concurrency
>   true
>
>
>   hive.enforce.bucketing
>   true
>
>
>   hive.txn.manager
>   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
>
>
>   hive.compactor.initiator.on
>   true
>
>
>   hive.compactor.worker.threads
>   1
>
> {code}
> *STEP 3. Configure hive.txn.timeout in hive-site.xml*
> {code}
> 
>
>   hive.txn.timeout
>   10s
>
> {code}
> *STEP 4. Connect via beeline to HS2 with KERBEROS*
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> {code}
> select count(*) from t1;
> {code}
> *STEP 5. Close connection and reconnect*
> {code}
> !close
> {code}
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> *STEP 6. Perform long playing query*
> This query lasts about 600s
> {code}
> select count(*) from t1 join t2 on t1.id = t2.id;
> {code}
> *EXPECTED RESULT*
> Query finishes successfully
> *ACTUAL RESULT*
> {code}
> 2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: 
> SASL negotiation failure
> javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
> at 
> com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
>  
> at 
> com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
> at 
> org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
>  
> at 
> org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
> at 
> org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
>  
> at 
> org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
>  
> at java.security.AccessController.doPrivileged(Native Method) 
> at javax.security.auth.Subject.doAs(Subject.java:360)
> at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
>  
> at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  
> 

[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-05-25 Thread Oleksiy Sayankin (JIRA)

 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Oleksiy Sayankin updated HIVE-19587:

Affects Version/s: 3.0.0
   2.3.2

> HeartBeat thread uses cancelled delegation token while connecting to meta on 
> KERBEROS cluster
> -
>
> Key: HIVE-19587
> URL: https://issues.apache.org/jira/browse/HIVE-19587
> Project: Hive
>  Issue Type: Bug
>  Components: Transactions
>Affects Versions: 3.0.0, 2.3.2
>Reporter: Oleksiy Sayankin
>Assignee: Oleksiy Sayankin
>Priority: Blocker
> Attachments: HIVE-19587.1.patch
>
>
> *STEP 1. Create test data*
> {code}
> create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> {code}
> Generate 10 000 000 lines of random data
> {code}
> package com.test.app;
> import java.io.FileNotFoundException;
> import java.io.PrintWriter;
> import java.util.concurrent.ThreadLocalRandom;
> public class App {
>   public static void main(String[] args) throws FileNotFoundException {
> try (PrintWriter out = new PrintWriter("table.data");) {
>   int min = 0;
>   int max = 10_000;
>   int numRows = 10_000_000;
>   for (int i = 0; i <= numRows - 1; i++){
> int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
> out.println(randomNum);
>   }
> }
>   }
> }
> {code}
> Upload data to Hive tables
> {code}
> load data local inpath '/home/myuser/table.data' into table t1;
> load data local inpath '/home/myuser/table.data' into table t2;
> {code}
> *STEP 2. Configure transactions in hive-site.xml*
> {code}
> 
>
>   hive.exec.dynamic.partition.mode
>   nonstrict
>
>
>   hive.support.concurrency
>   true
>
>
>   hive.enforce.bucketing
>   true
>
>
>   hive.txn.manager
>   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
>
>
>   hive.compactor.initiator.on
>   true
>
>
>   hive.compactor.worker.threads
>   1
>
> {code}
> *STEP 3. Configure hive.txn.timeout in hive-site.xml*
> {code}
> 
>
>   hive.txn.timeout
>   10s
>
> {code}
> *STEP 4. Connect via beeline to HS2 with KERBEROS*
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> {code}
> select count(*) from t1;
> {code}
> *STEP 5. Close connection and reconnect*
> {code}
> !close
> {code}
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> *STEP 6. Perform long playing query*
> This query lasts about 600s
> {code}
> select count(*) from t1 join t2 on t1.id = t2.id;
> {code}
> *EXPECTED RESULT*
> Query finishes successfully
> *ACTUAL RESULT*
> {code}
> 2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: 
> SASL negotiation failure
> javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
> at 
> com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
>  
> at 
> com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
> at 
> org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
>  
> at 
> org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
> at 
> org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
>  
> at 
> org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
>  
> at java.security.AccessController.doPrivileged(Native Method) 
> at javax.security.auth.Subject.doAs(Subject.java:360)
> at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
>  
> at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  
> at 
> 

[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-05-23 Thread Oleksiy Sayankin (JIRA)

 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Oleksiy Sayankin updated HIVE-19587:

Status: Patch Available  (was: In Progress)

> HeartBeat thread uses cancelled delegation token while connecting to meta on 
> KERBEROS cluster
> -
>
> Key: HIVE-19587
> URL: https://issues.apache.org/jira/browse/HIVE-19587
> Project: Hive
>  Issue Type: Bug
>  Components: Transactions
>Reporter: Oleksiy Sayankin
>Assignee: Oleksiy Sayankin
>Priority: Blocker
> Attachments: HIVE-19587.1.patch
>
>
> *STEP 1. Create test data*
> {code}
> create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> {code}
> Generate 10 000 000 lines of random data
> {code}
> package com.test.app;
> import java.io.FileNotFoundException;
> import java.io.PrintWriter;
> import java.util.concurrent.ThreadLocalRandom;
> public class App {
>   public static void main(String[] args) throws FileNotFoundException {
> try (PrintWriter out = new PrintWriter("table.data");) {
>   int min = 0;
>   int max = 10_000;
>   int numRows = 10_000_000;
>   for (int i = 0; i <= numRows - 1; i++){
> int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
> out.println(randomNum);
>   }
> }
>   }
> }
> {code}
> Upload data to Hive tables
> {code}
> load data local inpath '/home/myuser/table.data' into table t1;
> load data local inpath '/home/myuser/table.data' into table t2;
> {code}
> *STEP 2. Configure transactions in hive-site.xml*
> {code}
> 
>
>   hive.exec.dynamic.partition.mode
>   nonstrict
>
>
>   hive.support.concurrency
>   true
>
>
>   hive.enforce.bucketing
>   true
>
>
>   hive.txn.manager
>   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
>
>
>   hive.compactor.initiator.on
>   true
>
>
>   hive.compactor.worker.threads
>   1
>
> {code}
> *STEP 3. Configure hive.txn.timeout in hive-site.xml*
> {code}
> 
>
>   hive.txn.timeout
>   10s
>
> {code}
> *STEP 4. Connect via beeline to HS2 with KERBEROS*
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> {code}
> select count(*) from t1;
> {code}
> *STEP 5. Close connection and reconnect*
> {code}
> !close
> {code}
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> *STEP 6. Perform long playing query*
> This query lasts about 600s
> {code}
> select count(*) from t1 join t2 on t1.id = t2.id;
> {code}
> *EXPECTED RESULT*
> Query finishes successfully
> *ACTUAL RESULT*
> {code}
> 2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: 
> SASL negotiation failure
> javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
> at 
> com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
>  
> at 
> com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
> at 
> org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
>  
> at 
> org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
> at 
> org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
>  
> at 
> org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
>  
> at java.security.AccessController.doPrivileged(Native Method) 
> at javax.security.auth.Subject.doAs(Subject.java:360)
> at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
>  
> at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  
> at 

[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-05-23 Thread Oleksiy Sayankin (JIRA)

 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Oleksiy Sayankin updated HIVE-19587:

Attachment: HIVE-19587.1.patch

> HeartBeat thread uses cancelled delegation token while connecting to meta on 
> KERBEROS cluster
> -
>
> Key: HIVE-19587
> URL: https://issues.apache.org/jira/browse/HIVE-19587
> Project: Hive
>  Issue Type: Bug
>  Components: Transactions
>Reporter: Oleksiy Sayankin
>Assignee: Oleksiy Sayankin
>Priority: Blocker
> Attachments: HIVE-19587.1.patch
>
>
> *STEP 1. Create test data*
> {code}
> create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> {code}
> Generate 10 000 000 lines of random data
> {code}
> package com.test.app;
> import java.io.FileNotFoundException;
> import java.io.PrintWriter;
> import java.util.concurrent.ThreadLocalRandom;
> public class App {
>   public static void main(String[] args) throws FileNotFoundException {
> try (PrintWriter out = new PrintWriter("table.data");) {
>   int min = 0;
>   int max = 10_000;
>   int numRows = 10_000_000;
>   for (int i = 0; i <= numRows - 1; i++){
> int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
> out.println(randomNum);
>   }
> }
>   }
> }
> {code}
> Upload data to Hive tables
> {code}
> load data local inpath '/home/myuser/table.data' into table t1;
> load data local inpath '/home/myuser/table.data' into table t2;
> {code}
> *STEP 2. Configure transactions in hive-site.xml*
> {code}
> 
>
>   hive.exec.dynamic.partition.mode
>   nonstrict
>
>
>   hive.support.concurrency
>   true
>
>
>   hive.enforce.bucketing
>   true
>
>
>   hive.txn.manager
>   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
>
>
>   hive.compactor.initiator.on
>   true
>
>
>   hive.compactor.worker.threads
>   1
>
> {code}
> *STEP 3. Configure hive.txn.timeout in hive-site.xml*
> {code}
> 
>
>   hive.txn.timeout
>   10s
>
> {code}
> *STEP 4. Connect via beeline to HS2 with KERBEROS*
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> {code}
> select count(*) from t1;
> {code}
> *STEP 5. Close connection and reconnect*
> {code}
> !close
> {code}
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> *STEP 6. Perform long playing query*
> This query lasts about 600s
> {code}
> select count(*) from t1 join t2 on t1.id = t2.id;
> {code}
> *EXPECTED RESULT*
> Query finishes successfully
> *ACTUAL RESULT*
> {code}
> 2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: 
> SASL negotiation failure
> javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
> at 
> com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
>  
> at 
> com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
> at 
> org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
>  
> at 
> org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
> at 
> org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
>  
> at 
> org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
>  
> at java.security.AccessController.doPrivileged(Native Method) 
> at javax.security.auth.Subject.doAs(Subject.java:360)
> at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
>  
> at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  
> at 

[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-05-17 Thread Oleksiy Sayankin (JIRA)

 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Oleksiy Sayankin updated HIVE-19587:

Description: 
*STEP 1. Create test data*

{code}
create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
{code}

Generate 10 000 000 lines of random data

{code}
package com.test.app;

import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.concurrent.ThreadLocalRandom;

public class App {
  public static void main(String[] args) throws FileNotFoundException {
try (PrintWriter out = new PrintWriter("table.data");) {
  int min = 0;
  int max = 10_000;
  int numRows = 10_000_000;
  for (int i = 0; i <= numRows - 1; i++){
int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
out.println(randomNum);
  }
}
  }
}
{code}

Upload data to Hive tables

{code}
load data local inpath '/home/myuser/table.data' into table t1;
load data local inpath '/home/myuser/table.data' into table t2;
{code}

*STEP 2. Configure transactions in hive-site.xml*

{code}


   
  hive.exec.dynamic.partition.mode
  nonstrict
   
   
  hive.support.concurrency
  true
   
   
  hive.enforce.bucketing
  true
   
   
  hive.txn.manager
  org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
   
   
  hive.compactor.initiator.on
  true
   
   
  hive.compactor.worker.threads
  1
   
{code}

*STEP 3. Configure hive.txn.timeout in hive-site.xml*

{code}

   
  hive.txn.timeout
  10s
   
{code}

*STEP 4. Connect via beeline to HS2 with KERBEROS*

{code}
!connect 
jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
{code}

{code}
select count(*) from t1;
{code}

*STEP 5. Close connection and reconnect*

{code}
!close
{code}

{code}
!connect 
jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
{code}

*STEP 6. Perform long playing query*

This query lasts about 600s

{code}
select count(*) from t1 join t2 on t1.id = t2.id;
{code}


*EXPECTED RESULT*

Query finishes successfully

*ACTUAL RESULT*

{code}
2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: SASL 
negotiation failure
javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
at 
com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
 
at 
com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
at 
org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
 
at 
org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
at 
org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
 
at 
org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
 
at 
org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
 
at 
org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
 
at java.security.AccessController.doPrivileged(Native Method) 
at javax.security.auth.Subject.doAs(Subject.java:360)
at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
 
at 
org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
 
at 
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.hadoop.security.token.SecretManager$InvalidToken: token 
expired or does not exist: owner=myuser, renewer=myuser, realUser=, 
issueDate=1526565229297, maxDate=1527170029297, sequenceNumber=1, masterKeyId=1
at 
org.apache.hadoop.hive.thrift.TokenStoreDelegationTokenSecretManager.retrievePassword(TokenStoreDelegationTokenSecretManager.java:104)
 
at 
org.apache.hadoop.hive.thrift.TokenStoreDelegationTokenSecretManager.retrievePassword(TokenStoreDelegationTokenSecretManager.java:56)
at 
org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$SaslDigestCallbackHandler.getPassword(HadoopThriftAuthBridge.java:472)
at 
org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$SaslDigestCallbackHandler.handle(HadoopThriftAuthBridge.java:503)
 
   

[jira] [Updated] (HIVE-19587) HeartBeat thread uses cancelled delegation token while connecting to meta on KERBEROS cluster

2018-05-17 Thread Eugene Koifman (JIRA)

 [ 
https://issues.apache.org/jira/browse/HIVE-19587?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Eugene Koifman updated HIVE-19587:
--
Component/s: Transactions

> HeartBeat thread uses cancelled delegation token while connecting to meta on 
> KERBEROS cluster
> -
>
> Key: HIVE-19587
> URL: https://issues.apache.org/jira/browse/HIVE-19587
> Project: Hive
>  Issue Type: Bug
>  Components: Transactions
>Reporter: Oleksiy Sayankin
>Assignee: Oleksiy Sayankin
>Priority: Blocker
>
> *STEP 1. Create test data*
> {code}
> create table t1 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> create table t2 (id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY  ",";
> {code}
> Generate 10 000 000 lines of random data
> {code}
> package com.test.app;
> import java.io.FileNotFoundException;
> import java.io.PrintWriter;
> import java.util.concurrent.ThreadLocalRandom;
> public class App {
>   public static void main(String[] args) throws FileNotFoundException {
> try (PrintWriter out = new PrintWriter("table.data");) {
>   int min = 0;
>   int max = 10_000;
>   int numRows = 10_000_000;
>   for (int i = 0; i <= numRows - 1; i++){
> int randomNum = ThreadLocalRandom.current().nextInt(min, max + 1);
> out.println(randomNum);
>   }
> }
>   }
> }
> {code}
> Upload data to Hive tables
> {code}
> load data local inpath '/home/myuser/table.data' into table t1;
> load data local inpath '/home/myuser/table.data' into table t2;
> {code}
> *STEP 2. Configure transactions in hive-site.xml*
> {code}
> 
>
>   hive.exec.dynamic.partition.mode
>   nonstrict
>
>
>   hive.support.concurrency
>   true
>
>
>   hive.enforce.bucketing
>   true
>
>
>   hive.txn.manager
>   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
>
>
>   hive.compactor.initiator.on
>   true
>
>
>   hive.compactor.worker.threads
>   1
>
> {code}
> *STEP 3. Configure hive.txn.timeout in hive-site.xml*
> {code}
> 
>
>   hive.txn.timeout
>   10s
>
> {code}
> *STEP 4. Connect via beeline to HS2 with KERBEROS*
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> {code}
> select count(*) from t1;
> {code}
> *STEP 5. Close connection and reconnect*
> {code}
> !close
> {code}
> {code}
> !connect 
> jdbc:hive2://node8.cluster:1/default;principal=myuser/node8.cluster@NODE8;ssl=true;sslTrustStore=/opt/myuser/conf/ssl_truststore
> {code}
> *STEP 6. Perform long playing query*
> This query lasts about 600s
> {code}
> select count(*) from t1 join t2 on t1.id = t2.id;
> {code}
> *EXPECTED RESULT*
> Query finishes successfully
> *ACTUAL RESULT*
> {code}
> 2018-05-17T13:54:54,921 ERROR [pool-7-thread-10] transport.TSaslTransport: 
> SASL negotiation failure
> javax.security.sasl.SaslException: DIGEST-MD5: IO error acquiring password
> at 
> com.sun.security.sasl.digest.DigestMD5Server.validateClientResponse(DigestMD5Server.java:598)
>  
> at 
> com.sun.security.sasl.digest.DigestMD5Server.evaluateResponse(DigestMD5Server.java:244)
> at 
> org.apache.thrift.transport.TSaslTransport$SaslParticipant.evaluateChallengeOrResponse(TSaslTransport.java:539)
>  
> at 
> org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:283)
> at 
> org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
>  
> at 
> org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:663)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:660)
>  
> at java.security.AccessController.doPrivileged(Native Method) 
> at javax.security.auth.Subject.doAs(Subject.java:360)
> at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1613)
>  
> at 
> org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:660)
>  
> at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  
> at java.lang.Thread.run(Thread.java:748)
> Caused by: