hive git commit: HIVE-18552: Split hive.strict.checks.large.query into two configs (Sahil Takiar, reviewed by Vihang Karajgaonkar)

2018-02-02 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 47f45705f -> 4a33ec8fc


HIVE-18552: Split hive.strict.checks.large.query into two configs (Sahil 
Takiar, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4a33ec8f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4a33ec8f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4a33ec8f

Branch: refs/heads/master
Commit: 4a33ec8fcae5f7d18105ef62e33150db6e853af5
Parents: 47f4570
Author: Sahil Takiar 
Authored: Fri Feb 2 20:34:32 2018 -0600
Committer: Sahil Takiar 
Committed: Fri Feb 2 20:34:32 2018 -0600

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   | 21 +++-
 .../clientnegative/alter_view_failure6_2.q  |  2 +-
 .../queries/clientnegative/input_part0_neg_2.q  |  2 +-
 .../queries/clientnegative/strict_orderby_2.q   |  8 
 .../queries/clientnegative/strict_pruning_2.q   |  4 +---
 .../clientnegative/alter_view_failure6.q.out|  4 ++--
 .../clientnegative/alter_view_failure6_2.q.out  |  4 ++--
 .../clientnegative/compare_double_bigint.q.out  |  2 +-
 .../compare_double_bigint_2.q.out   |  2 +-
 .../clientnegative/compare_string_bigint.q.out  |  2 +-
 .../compare_string_bigint_2.q.out   |  2 +-
 ql/src/test/results/clientnegative/input4.q.out |  2 +-
 .../test/results/clientnegative/input4_2.q.out  |  2 +-
 .../clientnegative/input_part0_neg.q.out|  2 +-
 .../clientnegative/input_part0_neg_2.q.out  |  2 +-
 .../results/clientnegative/strict_join.q.out|  2 +-
 .../results/clientnegative/strict_join_2.q.out  |  2 +-
 .../results/clientnegative/strict_orderby.q.out |  2 +-
 .../clientnegative/strict_orderby_2.q.out   |  1 +
 .../results/clientnegative/strict_pruning.q.out |  2 +-
 .../clientnegative/strict_pruning_2.q.out   |  2 +-
 21 files changed, 41 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4a33ec8f/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4f2e6d3..99e8457 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1096,11 +1096,14 @@ public class HiveConf extends Configuration {
 "hive.txn.valid.txns,hive.script.operator.env.blacklist",
 "Comma separated list of keys from the configuration file not to 
convert to environment " +
 "variables when invoking the script operator"),
-HIVE_STRICT_CHECKS_LARGE_QUERY("hive.strict.checks.large.query", false,
+HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", 
false,
 "Enabling strict large query checks disallows the following:\n" +
 "  Orderby without limit.\n" +
+"Note that this check currently does not consider data size, only the 
query pattern."),
+
HIVE_STRICT_CHECKS_NO_PARTITION_FILTER("hive.strict.checks.no.partition.filter",
 false,
+"Enabling strict large query checks disallows the following:\n" +
 "  No partition being picked up for a query against partitioned 
table.\n" +
-"Note that these checks currently do not consider data size, only the 
query pattern."),
+"Note that this check currently does not consider data size, only the 
query pattern."),
 HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
 "Enabling strict type safety checks disallows the following:\n" +
 "  Comparing bigints and strings.\n" +
@@ -4971,10 +4974,10 @@ public class HiveConf extends Configuration {
   public static class StrictChecks {
 
 private static final String NO_LIMIT_MSG = makeMessage(
-"Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
+"Order by-s without limit", 
ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT);
 public static final String NO_PARTITIONLESS_MSG = makeMessage(
 "Queries against partitioned tables without a partition filter",
-ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
+ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER);
 private static final String NO_COMPARES_MSG = makeMessage(
 "Unsafe compares between different types", 
ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
 private static final String NO_CARTESIAN_MSG = makeMessage(
@@ -4984,17 +4987,17 @@ public class HiveConf extends Configuration {
 
 private static String makeMessage(String what, ConfVars setting) {
   return what + " are disabled for safety reasons. If you know what you 
are doing, please set "

hive git commit: HIVE-18599: Transactions: Fix CTAS on Micromanaged tables (Steve Yeom, reviewed by Gopal V)

2018-02-02 Thread gopalv
Repository: hive
Updated Branches:
  refs/heads/master f9efd84f8 -> 47f45705f


HIVE-18599: Transactions: Fix CTAS on Micromanaged tables (Steve Yeom, reviewed 
by Gopal V)

Signed-off-by: Gopal V 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/47f45705
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/47f45705
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/47f45705

Branch: refs/heads/master
Commit: 47f45705f997658b3ce5df4276915a6c0be56d8a
Parents: f9efd84
Author: Steve Yeom 
Authored: Fri Feb 2 16:48:02 2018 -0800
Committer: Gopal V 
Committed: Fri Feb 2 16:50:23 2018 -0800

--
 .../test/resources/testconfiguration.properties |  1 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 13 ++-
 ql/src/test/queries/clientpositive/mm_cttas.q   | 20 +
 .../results/clientpositive/llap/mm_cttas.q.out  | 89 
 4 files changed, 120 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/47f45705/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 70d0749..fed9394 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -71,6 +71,7 @@ minillap.shared.query.files=insert_into1.q,\
   mapreduce1.q,\
   mapreduce2.q,\
   mm_all.q,\
+  mm_cttas.q,\
   orc_merge1.q,\
   orc_merge10.q,\
   orc_merge2.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/47f45705/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index dbf9363..c2e2499 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -2053,8 +2053,15 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 }
   }
   try {
-fname = ctx.getExtTmpPathRelTo(
-FileUtils.makeQualified(location, conf)).toString();
+CreateTableDesc tblDesc = qb.getTableDesc();
+if (tblDesc != null
+&& tblDesc.isTemporary()
+&& AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), 
true)) {
+  fname = FileUtils.makeQualified(location, conf).toString();
+} else {
+  fname = ctx.getExtTmpPathRelTo(
+  FileUtils.makeQualified(location, conf)).toString();
+}
   } catch (Exception e) {
 throw new SemanticException(generateErrorMessage(ast,
 "Error creating temporary folder on: " + 
location.toString()), e);
@@ -6842,7 +6849,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 field_schemas = new ArrayList();
 destTableIsTemporary = tblDesc.isTemporary();
 destTableIsMaterialization = tblDesc.isMaterialization();
-if (!destTableIsTemporary && 
AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
+if (AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
   isMmTable = isMmCtas = true;
   txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
   tblDesc.setInitialMmWriteId(txnId);

http://git-wip-us.apache.org/repos/asf/hive/blob/47f45705/ql/src/test/queries/clientpositive/mm_cttas.q
--
diff --git a/ql/src/test/queries/clientpositive/mm_cttas.q 
b/ql/src/test/queries/clientpositive/mm_cttas.q
new file mode 100644
index 000..b099d2f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/mm_cttas.q
@@ -0,0 +1,20 @@
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table intermediate;
+create table intermediate(key int) partitioned by (p int) stored as orc;
+insert into table intermediate partition(p='455') select distinct key from src 
where key >= 0 order by key desc limit 2;
+insert into table intermediate partition(p='456') select distinct key from src 
where key is not null order by key asc limit 2;
+insert into table intermediate partition(p='457') select distinct key from src 
where key >= 100 order by key asc limit 2;
+
+drop table cttas1_mm;
+
+create temporary table cttas1_mm tblproperties ("transactional"="true", 

hive git commit: HIVE-18589 java.io.IOException: Not enough history available (Eugene Koifman reviewed by Gopal V)

2018-02-02 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/master 1223031b9 -> f9efd84f8


HIVE-18589 java.io.IOException: Not enough history available (Eugene Koifman 
reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f9efd84f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f9efd84f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f9efd84f

Branch: refs/heads/master
Commit: f9efd84f8bf542fa0e435bc1466151c07de2
Parents: 1223031
Author: Eugene Koifman 
Authored: Fri Feb 2 15:40:51 2018 -0800
Committer: Eugene Koifman 
Committed: Fri Feb 2 15:40:51 2018 -0800

--
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  34 +++---
 .../hive/ql/txn/compactor/CompactorMR.java  |   9 ++
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |  90 +++
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java | 109 ---
 .../hive/ql/txn/compactor/TestWorker.java   |  11 +-
 5 files changed, 79 insertions(+), 174 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f9efd84f/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index eb75308..430e0fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1067,13 +1067,21 @@ public class AcidUtils {
* snapshot for this reader.
* Note that such base is NOT obsolete.  Obsolete files are those that are 
"covered" by other
* files within the snapshot.
+   * A base produced by Insert Overwrite is different.  Logically it's a delta 
file but one that
+   * causes anything written previously is ignored (hence the overwrite).  In 
this case, base_x
+   * is visible if txnid:x is committed for current reader.
*/
-  private static boolean isValidBase(long baseTxnId, ValidTxnList txnList) {
+  private static boolean isValidBase(long baseTxnId, ValidTxnList txnList, 
Path baseDir,
+  FileSystem fs) throws IOException {
 if(baseTxnId == Long.MIN_VALUE) {
   //such base is created by 1st compaction in case of non-acid to acid 
table conversion
   //By definition there are no open txns with id < 1.
   return true;
 }
+if(!MetaDataFile.isCompacted(baseDir, fs)) {
+  //this is the IOW case
+  return txnList.isTxnValid(baseTxnId);
+}
 return txnList.isValidBase(baseTxnId);
   }
   private static void getChildState(FileStatus child, HdfsFileStatusWithId 
childWithId,
@@ -1091,12 +1099,12 @@ public class AcidUtils {
 bestBase.oldestBaseTxnId = txn;
   }
   if (bestBase.status == null) {
-if(isValidBase(txn, txnList)) {
+if(isValidBase(txn, txnList, p, fs)) {
   bestBase.status = child;
   bestBase.txn = txn;
 }
   } else if (bestBase.txn < txn) {
-if(isValidBase(txn, txnList)) {
+if(isValidBase(txn, txnList, p, fs)) {
   obsolete.add(bestBase.status);
   bestBase.status = child;
   bestBase.txn = txn;
@@ -1484,6 +1492,8 @@ public class AcidUtils {
   }
 
   /**
+   * General facility to place a metadta file into a dir created by 
acid/compactor write.
+   *
* Load Data commands against Acid tables write {@link 
AcidBaseFileType#ORIGINAL_BASE} type files
* into delta_x_x/ (or base_x in case there is Overwrite clause).  {@link 
MetaDataFile} is a
* small JSON file in this directory that indicates that these files don't 
have Acid metadata
@@ -1499,17 +1509,14 @@ public class AcidUtils {
   String DATA_FORMAT = "dataFormat";
 }
 private interface Value {
-  //plain ORC file
-  String RAW = "raw";
-  //result of acid write, i.e. decorated with ROW__ID info
-  String NATIVE = "native";
+  //written by Major compaction
+  String COMPACTED = "compacted";
 }
 
 /**
  * @param baseOrDeltaDir detla or base dir, must exist
  */
-public static void createMetaFile(Path baseOrDeltaDir, FileSystem fs, 
boolean isRawFormat)
-  throws IOException {
+public static void createCompactorMarker(Path baseOrDeltaDir, FileSystem 
fs) throws IOException {
   /**
* create _meta_data json file in baseOrDeltaDir
* write thisFileVersion, dataFormat
@@ -1519,7 +1526,7 @@ public class AcidUtils {
   Path formatFile = new Path(baseOrDeltaDir, METADATA_FILE);
   Map metaData = new HashMap<>();
   metaData.put(Field.VERSION, CURRENT_VERSION);
-  metaData.put(Field.DATA_FORMAT, isRawFormat ? Value.RAW : Value.NATIVE);
+  metaData.put(Field.DATA_FORMAT, 

hive git commit: HIVE-18516 : load data should rename files consistent with insert statements for ACID Tables (Deepak Jaiswal, reviewed by Eugene Koifman and Jason Dere)

2018-02-02 Thread djaiswal
Repository: hive
Updated Branches:
  refs/heads/master c2e335fc0 -> 1223031b9


HIVE-18516 : load data should rename files consistent with insert statements 
for ACID Tables (Deepak Jaiswal, reviewed by Eugene Koifman and Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1223031b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1223031b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1223031b

Branch: refs/heads/master
Commit: 1223031b9afdb7e22f9da09f6c8bb47037e447d0
Parents: c2e335f
Author: djaiswal 
Authored: Fri Feb 2 13:25:53 2018 -0800
Committer: djaiswal 
Committed: Fri Feb 2 13:27:08 2018 -0800

--
 .../test/resources/testconfiguration.properties |  1 +
 .../apache/hadoop/hive/ql/metadata/Hive.java| 75 +---
 .../hive/ql/parse/LoadSemanticAnalyzer.java |  8 ---
 .../apache/hadoop/hive/ql/TestTxnLoadData.java  | 12 ++--
 .../hive/ql/metadata/TestHiveCopyFiles.java | 12 ++--
 .../clientnegative/load_data_into_acid.q| 20 --
 .../test/queries/clientpositive/smb_mapjoin_7.q |  4 +-
 .../clientnegative/load_data_into_acid.q.out| 33 -
 .../clientpositive/beeline/smb_mapjoin_7.q.out  |  8 +--
 .../results/clientpositive/smb_mapjoin_7.q.out  |  8 +--
 .../clientpositive/spark/smb_mapjoin_7.q.out|  8 +--
 11 files changed, 75 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index d86ff58..70d0749 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -570,6 +570,7 @@ minillaplocal.query.files=\
   list_bucket_dml_10.q,\
   llap_partitioned.q,\
   llap_vector_nohybridgrace.q,\
+  load_data_acid_rename.q,\
   load_dyn_part5.q,\
   lvj_mapjoin.q,\
   materialized_view_create_rewrite_dummy.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/1223031b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 3b97dac..b1e05df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -31,18 +31,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 import java.util.Map.Entry;
-import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
@@ -1881,11 +1871,12 @@ public class Hive {
   // base_x.  (there is Insert Overwrite and Load Data Overwrite)
   boolean isAutoPurge = 
"true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
   replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, 
getConf(),
-  isSrcLocal, isAutoPurge, newFiles, filter, 
isMmTableWrite?true:false, !tbl.isTemporary());
+  isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite, 
!tbl.isTemporary());
 } else {
   FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
   copyFiles(conf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
-(loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles);
+(loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, 
tbl.getNumBuckets() > 0,
+  isFullAcidTable);
 }
   }
   perfLogger.PerfLogEnd("MoveTask", "FileMoves");
@@ -2432,7 +2423,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
 try {
   FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
   copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
-loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles);
+loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles,
+  tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable);
 } catch (IOException e) {
   throw new HiveException("addFiles: filesystem error in check phase", 
e);
 }
@@ 

hive git commit: HIVE-18606 CTAS on empty table throws NPE from org.apache.hadoop.hive.ql.exec.MoveTask (Eugene Koifman, reviewed by Sergey Shelukhin)

2018-02-02 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/master 464a3f61a -> c2e335fc0


HIVE-18606 CTAS on empty table throws NPE from 
org.apache.hadoop.hive.ql.exec.MoveTask (Eugene Koifman, reviewed by Sergey 
Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c2e335fc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c2e335fc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c2e335fc

Branch: refs/heads/master
Commit: c2e335fc0b4a8144d8d93ff10e9191432ae6547e
Parents: 464a3f6
Author: Eugene Koifman 
Authored: Fri Feb 2 12:14:36 2018 -0800
Committer: Eugene Koifman 
Committed: Fri Feb 2 12:14:36 2018 -0800

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java |  9 +++--
 .../test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java | 11 +++
 2 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c2e335fc/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 114d455..4e804ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -294,8 +294,13 @@ public class MoveTask extends Task implements 
Serializable {
 //'sourcePath' result of 'select ...' part of CTAS statement
 assert lfd.getIsDfsDir();
 FileSystem srcFs = sourcePath.getFileSystem(conf);
-List newFiles = new ArrayList<>();
-Hive.moveAcidFiles(srcFs, srcFs.globStatus(sourcePath), 
targetPath, newFiles);
+FileStatus[] srcs = srcFs.globStatus(sourcePath);
+if(srcs != null) {
+  List newFiles = new ArrayList<>();
+  Hive.moveAcidFiles(srcFs, srcs, targetPath, newFiles);
+} else {
+  LOG.debug("No files found to move from " + sourcePath + " to " + 
targetPath);
+}
   }
   else {
 moveFile(sourcePath, targetPath, lfd.getIsDfsDir());

http://git-wip-us.apache.org/repos/asf/hive/blob/c2e335fc/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
index bd63f5b..3c6b6be 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
@@ -178,6 +178,7 @@ public class TestTxnNoBuckets extends 
TxnCommandsBaseForTests {
*/
   @Test
   public void testCTAS() throws Exception {
+runStatementOnDriver("drop table if exists myctas");
 int[][] values = {{1,2},{3,4}};
 runStatementOnDriver("insert into " + Table.NONACIDORCTBL +  
makeValuesClause(values));
 runStatementOnDriver("create table myctas stored as ORC TBLPROPERTIES 
('transactional" +
@@ -221,6 +222,16 @@ public class TestTxnNoBuckets extends 
TxnCommandsBaseForTests {
 };
 checkExpected(rs, expected4, "Unexpected row count after ctas from union 
distinct query");
   }
+  @Test
+  public void testCtasEmpty() throws Exception {
+MetastoreConf.setBoolVar(hiveConf, 
MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true);
+runStatementOnDriver("drop table if exists myctas");
+runStatementOnDriver("create table myctas stored as ORC as" +
+" select a, b from " + Table.NONACIDORCTBL);
+List rs = runStatementOnDriver("select ROW__ID, a, b, 
INPUT__FILE__NAME" +
+" from myctas order by ROW__ID");
+  }
+
   /**
* Insert into unbucketed acid table from union all query
* Union All is flattend so nested subdirs are created and acid move drops 
them since



hive git commit: HIVE-18518 : Upgrade druid version to 0.11.0 (Nishant Bangarwa via Ashutosh Chauhan)

2018-02-02 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 466f51034 -> 464a3f61a


HIVE-18518 : Upgrade druid version to 0.11.0 (Nishant Bangarwa via Ashutosh 
Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/464a3f61
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/464a3f61
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/464a3f61

Branch: refs/heads/master
Commit: 464a3f61a0c4a1c4e44a1ce427f604295534e969
Parents: 466f510
Author: Nishant Bangarwa 
Authored: Tue Jan 23 08:27:00 2018 -0800
Committer: Ashutosh Chauhan 
Committed: Fri Feb 2 10:53:17 2018 -0800

--
 druid-handler/pom.xml   |  2 +-
 .../hive/druid/DruidStorageHandlerUtils.java|  2 +-
 .../serde/HiveDruidSerializationModule.java |  3 ++
 .../hive/druid/TestDruidStorageHandler.java | 45 ++--
 .../TestHiveDruidQueryBasedInputFormat.java | 16 +++
 .../hive/ql/io/TestDruidRecordWriter.java   |  2 +-
 itests/qtest-druid/pom.xml  |  4 +-
 pom.xml |  4 +-
 8 files changed, 41 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/pom.xml
--
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 2a62b90..670d82b 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -29,7 +29,7 @@
 
   
 ..
-0.27.10
+1.3.2
 16.0.1
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 9de0097..2f956b1 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -24,12 +24,12 @@ import com.fasterxml.jackson.dataformat.smile.SmileFactory;
 import com.google.common.base.Throwables;
 import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
+import com.metamx.common.JodaUtils;
 import com.metamx.emitter.EmittingLogger;
 import com.metamx.emitter.core.NoopEmitter;
 import com.metamx.emitter.service.ServiceEmitter;
 import com.metamx.http.client.HttpClient;
 import com.metamx.http.client.response.InputStreamResponseHandler;
-import io.druid.common.utils.JodaUtils;
 import io.druid.jackson.DefaultObjectMapper;
 import io.druid.math.expr.ExprMacroTable;
 import io.druid.metadata.MetadataStorageTablesConfig;

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
index f72fd0d..8a110ae 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hive.druid.serde;
 
 import io.druid.java.util.common.granularity.PeriodGranularity;
+import io.druid.query.spec.LegacySegmentSpec;
 
 import com.fasterxml.jackson.core.util.VersionUtil;
 import com.fasterxml.jackson.databind.module.SimpleModule;
 
+import org.joda.time.Interval;
+
 /**
  * This class is used to define/override any serde behavior for classes from 
druid.
  * Currently it is used to override the default behavior when serializing 
PeriodGranularity to include user timezone.

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
--
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
index 6f7fc78..6a496c2 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
@@ -45,6 +45,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import 

hive git commit: HIVE-18449 : Add configurable policy for choosing the HMS URI from hive.metastore.uris (Janaki Lahorani, reviewed by Vihang Karajgaonkar)

2018-02-02 Thread vihangk1
Repository: hive
Updated Branches:
  refs/heads/master fdd8fabdc -> 466f51034


HIVE-18449 : Add configurable policy for choosing the HMS URI from 
hive.metastore.uris (Janaki Lahorani, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/466f5103
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/466f5103
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/466f5103

Branch: refs/heads/master
Commit: 466f51034e18d0fdf8527f1eec5d421cebb41db4
Parents: fdd8fab
Author: Vihang Karajgaonkar 
Authored: Fri Feb 2 10:50:47 2018 -0800
Committer: Vihang Karajgaonkar 
Committed: Fri Feb 2 10:50:47 2018 -0800

--
 .../org/apache/hadoop/hive/conf/HiveConf.java |  8 +++-
 .../hive/metastore/HiveMetaStoreClient.java   | 18 +++---
 .../hadoop/hive/metastore/conf/MetastoreConf.java |  6 ++
 3 files changed, 24 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/466f5103/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b7d3e99..4f2e6d3 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -216,6 +216,7 @@ public class HiveConf extends Configuration {
   HiveConf.ConfVars.METASTOREWAREHOUSE,
   HiveConf.ConfVars.REPLDIR,
   HiveConf.ConfVars.METASTOREURIS,
+  HiveConf.ConfVars.METASTORESELECTION,
   HiveConf.ConfVars.METASTORE_SERVER_PORT,
   HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
   HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
@@ -632,7 +633,12 @@ public class HiveConf extends Configuration {
 "location of default database for the warehouse"),
 METASTOREURIS("hive.metastore.uris", "",
 "Thrift URI for the remote metastore. Used by metastore client to 
connect to remote metastore."),
-
+METASTORESELECTION("hive.metastore.uri.selection", "RANDOM",
+new StringSet("SEQUENTIAL", "RANDOM"),
+"Determines the selection mechanism used by metastore client to 
connect to remote " +
+"metastore.  SEQUENTIAL implies that the first valid metastore 
from the URIs specified " +
+"as part of hive.metastore.uris will be picked.  RANDOM implies 
that the metastore " +
+"will be picked randomly"),
 METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true,
 "Whether to check client capabilities for potentially breaking API 
usage."),
 METASTORE_FASTPATH("hive.metastore.fastpath", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/466f5103/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 3a468b1..a3cb17b 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -195,9 +195,11 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
 
 }
 // make metastore URIS random
-List uriList = Arrays.asList(metastoreUris);
-Collections.shuffle(uriList);
-metastoreUris = (URI[]) uriList.toArray();
+if (MetastoreConf.getVar(conf, 
ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+  List uriList = Arrays.asList(metastoreUris);
+  Collections.shuffle(uriList);
+  metastoreUris = (URI[]) uriList.toArray();
+}
   } catch (IllegalArgumentException e) {
 throw (e);
   } catch (Exception e) {
@@ -322,10 +324,12 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   " at the client level.");
 } else {
   close();
-  // Swap the first element of the metastoreUris[] with a random element 
from the rest
-  // of the array. Rationale being that this method will generally be 
called when the default
-  // connection has died and the default connection is likely to be the 
first array element.
-  promoteRandomMetaStoreURI();
+  if (MetastoreConf.getVar(conf, 
ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+// Swap the first element of the metastoreUris[] with a random element 
from the rest
+  

hive git commit: HIVE-18447: JDBC: Provide a way for JDBC users to pass cookie info via connection string (Vaibhav Gumashta reviewed by Thejas Nair)

2018-02-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 39f1e82ad -> fdd8fabdc


HIVE-18447: JDBC: Provide a way for JDBC users to pass cookie info via 
connection string (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fdd8fabd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fdd8fabd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fdd8fabd

Branch: refs/heads/master
Commit: fdd8fabdcc5f8eb6ce749f55ec4637a0b96b4423
Parents: 39f1e82
Author: Vaibhav Gumashta 
Authored: Fri Feb 2 10:22:18 2018 -0800
Committer: Vaibhav Gumashta 
Committed: Fri Feb 2 10:22:18 2018 -0800

--
 .../TestThriftHttpCLIServiceFeatures.java   | 70 +++-
 .../org/apache/hive/jdbc/HiveConnection.java| 23 ---
 .../hive/jdbc/HttpBasicAuthInterceptor.java | 13 ++--
 .../jdbc/HttpKerberosRequestInterceptor.java|  8 +--
 .../hive/jdbc/HttpRequestInterceptorBase.java   | 20 +-
 .../hive/jdbc/HttpTokenAuthInterceptor.java |  6 +-
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |  8 ++-
 7 files changed, 105 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fdd8fabd/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
index 93b10fb..9012867 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
@@ -90,18 +90,19 @@ public class TestThriftHttpCLIServiceFeatures  {
*/
   public class HttpBasicAuthInterceptorWithLogging extends 
HttpBasicAuthInterceptor {
 
-   ArrayList requestHeaders;
+ArrayList requestHeaders;
+String cookieHeader;
 
-   public HttpBasicAuthInterceptorWithLogging(String username,
-  String password, CookieStore cookieStore, String cn, boolean isSSL,
-  Map additionalHeaders) {
-  super(username, password, cookieStore, cn, isSSL, additionalHeaders);
+public HttpBasicAuthInterceptorWithLogging(String username, String 
password,
+CookieStore cookieStore, String cn, boolean isSSL, Map 
additionalHeaders,
+Map customCookies) {
+  super(username, password, cookieStore, cn, isSSL, additionalHeaders, 
customCookies);
   requestHeaders = new ArrayList();
 }
 
 @Override
 public void process(HttpRequest httpRequest, HttpContext httpContext)
-  throws HttpException, IOException {
+throws HttpException, IOException {
   super.process(httpRequest, httpContext);
 
   String currHeaders = "";
@@ -110,11 +111,21 @@ public class TestThriftHttpCLIServiceFeatures  {
 currHeaders += h.getName() + ":" + h.getValue() + " ";
   }
   requestHeaders.add(currHeaders);
+
+  Header[] headers = httpRequest.getHeaders("Cookie");
+  cookieHeader = "";
+  for (Header h : headers) {
+cookieHeader = cookieHeader + h.getName() + ":" + h.getValue();
+  }
 }
 
-public ArrayList  getRequestHeaders() {
+public ArrayList getRequestHeaders() {
   return requestHeaders;
 }
+
+public String getCookieHeader() {
+  return cookieHeader;
+}
   }
 
 
@@ -130,7 +141,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 assertNotNull(ThriftCLIServiceTest.hiveServer2);
 assertNotNull(ThriftCLIServiceTest.hiveConf);
 HiveConf hiveConf = ThriftCLIServiceTest.hiveConf;
-
+
 hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
 hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, 
ThriftCLIServiceTest.host);
 hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, 
ThriftCLIServiceTest.port);
@@ -219,7 +230,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 String httpUrl = getHttpUrl();
 httpClient.addRequestInterceptor(
 new HttpBasicAuthInterceptor(ThriftCLIServiceTest.USERNAME, 
ThriftCLIServiceTest.PASSWORD,
-null, null, false, null));
+null, null, false, null, null));
 return new THttpClient(httpUrl, httpClient);
   }
 
@@ -243,7 +254,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 additionalHeaders.put("key2", "value2");
 HttpBasicAuthInterceptorWithLogging authInt =
   new HttpBasicAuthInterceptorWithLogging(ThriftCLIServiceTest.USERNAME, 

hive git commit: HIVE-18590 : Assertion error on transitive join inference in the presence of NOT NULL constraint (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-02-02 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 32b899448 -> 39f1e82ad


HIVE-18590 : Assertion error on transitive join inference in the presence of 
NOT NULL constraint (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/39f1e82a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/39f1e82a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/39f1e82a

Branch: refs/heads/master
Commit: 39f1e82ad2c974357dfc0d38ed776e3e25a2c4db
Parents: 32b8994
Author: Jesus Camacho Rodriguez 
Authored: Fri Feb 2 00:07:16 2018 -0800
Committer: Ashutosh Chauhan 
Committed: Fri Feb 2 00:07:16 2018 -0800

--
 .../HiveJoinPushTransitivePredicatesRule.java   |  23 ++-
 .../nullability_transitive_inference.q  |  40 
 .../nullability_transitive_inference.q.out  | 181 +++
 3 files changed, 240 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/39f1e82a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
index 3cd00f4..48b7765 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
@@ -34,6 +34,7 @@ import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.sql.type.SqlTypeName;
@@ -103,13 +104,15 @@ public class HiveJoinPushTransitivePredicatesRule extends 
RelOptRule {
 
 if (!newLeftPredicate.isAlwaysTrue()) {
   RelNode curr = lChild;
-  lChild = filterFactory.createFilter(lChild, newLeftPredicate);
+  lChild = filterFactory.createFilter(
+  lChild, newLeftPredicate.accept(new RexReplacer(lChild)));
   call.getPlanner().onCopy(curr, lChild);
 }
 
 if (!newRightPredicate.isAlwaysTrue()) {
   RelNode curr = rChild;
-  rChild = filterFactory.createFilter(rChild, newRightPredicate);
+  rChild = filterFactory.createFilter(
+  rChild, newRightPredicate.accept(new RexReplacer(rChild)));
   call.getPlanner().onCopy(curr, rChild);
 }
 
@@ -161,7 +164,7 @@ public class HiveJoinPushTransitivePredicatesRule extends 
RelOptRule {
 return typeSafeRex;
   }
 
-  private static class InputRefValidator  extends RexVisitorImpl {
+  private static class InputRefValidator extends RexVisitorImpl {
 
 private final List types;
 protected InputRefValidator(List types) {
@@ -201,5 +204,17 @@ public class HiveJoinPushTransitivePredicatesRule extends 
RelOptRule {
   return false;
 }
   }
-}
 
+  /* Changes the type of the input references to adjust nullability */
+  private static class RexReplacer extends RexShuttle {
+private final RelNode input;
+
+RexReplacer(RelNode input) {
+  this.input = input;
+}
+
+@Override public RexNode visitInputRef(RexInputRef inputRef) {
+  return RexInputRef.of(inputRef.getIndex(), input.getRowType());
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/39f1e82a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
--
diff --git 
a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q 
b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
new file mode 100644
index 000..ff0ce3a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
@@ -0,0 +1,40 @@
+-- SORT_QUERY_RESULTS
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.strict.checks.cartesian.product=false;
+set hive.stats.fetch.column.stats=true;
+set hive.materializedview.rewriting=true;
+
+create table emps (
+  empid int,
+  deptno int,
+  name varchar(256),
+  salary float,
+  commission int)
+stored as orc TBLPROPERTIES ('transactional'='true');
+insert into emps values (100, 10, 'Bill', 1, 1000), (200, 20, 'Eric', 
8000, 500),
+  (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 1, 250), (110, 
10, 'Bill', 1,