[ 
https://issues.apache.org/jira/browse/DRILL-6834?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16699200#comment-16699200
 ] 

ASF GitHub Bot commented on DRILL-6834:
---------------------------------------

asfgit closed pull request #1549: DRILL-6834: Introduce option to disable 
result set on CTAS, create vi…
URL: https://github.com/apache/drill/pull/1549
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.cc 
b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
index 8bb6e07daab..ee81ee2060e 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.cc
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
@@ -213,10 +213,11 @@ void protobuf_AssignDesc_UserBitShared_2eproto() {
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(ParsingError));
   RecordBatchDef_descriptor_ = file->message_type(6);
-  static const int RecordBatchDef_offsets_[3] = {
+  static const int RecordBatchDef_offsets_[4] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RecordBatchDef, 
record_count_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RecordBatchDef, field_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RecordBatchDef, 
carries_two_byte_selection_vector_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RecordBatchDef, 
affected_rows_count_),
   };
   RecordBatchDef_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -302,10 +303,11 @@ void protobuf_AssignDesc_UserBitShared_2eproto() {
       sizeof(QueryResult));
   QueryResult_QueryState_descriptor_ = QueryResult_descriptor_->enum_type(0);
   QueryData_descriptor_ = file->message_type(11);
-  static const int QueryData_offsets_[3] = {
+  static const int QueryData_offsets_[4] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryData, query_id_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryData, row_count_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryData, def_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryData, 
affected_rows_count_),
   };
   QueryData_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -673,117 +675,119 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
     "\013method_name\030\004 \001(\t\022\030\n\020is_native_method\030\005"
     " \001(\010\"\\\n\014ParsingError\022\024\n\014start_column\030\002 \001"
     "(\005\022\021\n\tstart_row\030\003 
\001(\005\022\022\n\nend_column\030\004 \001("
-    "\005\022\017\n\007end_row\030\005 
\001(\005\"~\n\016RecordBatchDef\022\024\n\014"
-    "record_count\030\001 \001(\005\022+\n\005field\030\002 
\003(\0132\034.exec"
-    ".shared.SerializedField\022)\n!carries_two_b"
-    "yte_selection_vector\030\003 \001(\010\"\205\001\n\010NamePart\022"
-    "(\n\004type\030\001 \001(\0162\032.exec.shared.NamePart.Typ"
-    "e\022\014\n\004name\030\002 \001(\t\022$\n\005child\030\003 
\001(\0132\025.exec.sh"
-    "ared.NamePart\"\033\n\004Type\022\010\n\004NAME\020\000\022\t\n\005ARRAY"
-    "\020\001\"\324\001\n\017SerializedField\022%\n\nmajor_type\030\001 \001"
-    "(\0132\021.common.MajorType\022(\n\tname_part\030\002 \001(\013"
-    "2\025.exec.shared.NamePart\022+\n\005child\030\003 \003(\0132\034"
-    ".exec.shared.SerializedField\022\023\n\013value_co"
-    "unt\030\004 \001(\005\022\027\n\017var_byte_length\030\005 
\001(\005\022\025\n\rbu"
-    "ffer_length\030\007 \001(\005\"7\n\nNodeStatus\022\017\n\007node_"
-    "id\030\001 \001(\005\022\030\n\020memory_footprint\030\002 
\001(\003\"\263\002\n\013Q"
-    "ueryResult\0228\n\013query_state\030\001 \001(\0162#.exec.s"
-    "hared.QueryResult.QueryState\022&\n\010query_id"
-    "\030\002 \001(\0132\024.exec.shared.QueryId\022(\n\005error\030\003 "
-    "\003(\0132\031.exec.shared.DrillPBError\"\227\001\n\nQuery"
-    
"State\022\014\n\010STARTING\020\000\022\013\n\007RUNNING\020\001\022\r\n\tCOMP"
-    
"LETED\020\002\022\014\n\010CANCELED\020\003\022\n\n\006FAILED\020\004\022\032\n\026CAN"
-    "CELLATION_REQUESTED\020\005\022\014\n\010ENQUEUED\020\006\022\r\n\tP"
-    "REPARING\020\007\022\014\n\010PLANNING\020\010\"p\n\tQueryData\022&\n"
-    "\010query_id\030\001 \001(\0132\024.exec.shared.QueryId\022\021\n"
-    "\trow_count\030\002 \001(\005\022(\n\003def\030\003 
\001(\0132\033.exec.sha"
-    "red.RecordBatchDef\"\330\001\n\tQueryInfo\022\r\n\005quer"
-    "y\030\001 \001(\t\022\r\n\005start\030\002 
\001(\003\0222\n\005state\030\003 \001(\0162#."
-    "exec.shared.QueryResult.QueryState\022\017\n\004us"
-    "er\030\004 \001(\t:\001-\022\'\n\007foreman\030\005 
\001(\0132\026.exec.Dril"
-    "lbitEndpoint\022\024\n\014options_json\030\006 \001(\t\022\022\n\nto"
-    "tal_cost\030\007 \001(\001\022\025\n\nqueue_name\030\010 
\001(\t:\001-\"\263\004"
-    "\n\014QueryProfile\022 \n\002id\030\001 \001(\0132\024.exec.shared"
-    ".QueryId\022$\n\004type\030\002 \001(\0162\026.exec.shared.Que"
-    "ryType\022\r\n\005start\030\003 \001(\003\022\013\n\003end\030\004 
\001(\003\022\r\n\005qu"
-    "ery\030\005 \001(\t\022\014\n\004plan\030\006 
\001(\t\022\'\n\007foreman\030\007 \001(\013"
-    "2\026.exec.DrillbitEndpoint\0222\n\005state\030\010 \001(\0162"
-    "#.exec.shared.QueryResult.QueryState\022\027\n\017"
-    "total_fragments\030\t \001(\005\022\032\n\022finished_fragme"
-    "nts\030\n \001(\005\022;\n\020fragment_profile\030\013 \003(\0132!.ex"
-    "ec.shared.MajorFragmentProfile\022\017\n\004user\030\014"
-    " \001(\t:\001-\022\r\n\005error\030\r 
\001(\t\022\024\n\014verboseError\030\016"
-    " \001(\t\022\020\n\010error_id\030\017 
\001(\t\022\022\n\nerror_node\030\020 \001"
-    "(\t\022\024\n\014options_json\030\021 
\001(\t\022\017\n\007planEnd\030\022 \001("
-    "\003\022\024\n\014queueWaitEnd\030\023 
\001(\003\022\022\n\ntotal_cost\030\024 "
-    "\001(\001\022\025\n\nqueue_name\030\025 
\001(\t:\001-\022\017\n\007queryId\030\026 "
-    "\001(\t\"t\n\024MajorFragmentProfile\022\031\n\021major_fra"
-    "gment_id\030\001 \001(\005\022A\n\026minor_fragment_profile"
-    "\030\002 \003(\0132!.exec.shared.MinorFragmentProfil"
-    "e\"\350\002\n\024MinorFragmentProfile\022)\n\005state\030\001 \001("
-    "\0162\032.exec.shared.FragmentState\022(\n\005error\030\002"
-    " \001(\0132\031.exec.shared.DrillPBError\022\031\n\021minor"
-    "_fragment_id\030\003 \001(\005\0226\n\020operator_profile\030\004"
-    " \003(\0132\034.exec.shared.OperatorProfile\022\022\n\nst"
-    "art_time\030\005 \001(\003\022\020\n\010end_time\030\006 
\001(\003\022\023\n\013memo"
-    "ry_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 
\001(\003\022("
-    "\n\010endpoint\030\t \001(\0132\026.exec.DrillbitEndpoint"
-    "\022\023\n\013last_update\030\n 
\001(\003\022\025\n\rlast_progress\030\013"
-    " \001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_profil"
-    "e\030\001 \003(\0132\032.exec.shared.StreamProfile\022\023\n\013o"
-    "perator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 
\001(\005\022"
-    "\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos\030\006 "
-    "\001(\003\022#\n\033peak_local_memory_allocated\030\007 \001(\003"
-    "\022(\n\006metric\030\010 \003(\0132\030.exec.shared.MetricVal"
-    "ue\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProfile\022"
-    "\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 
\001(\003\022\017\n\007sch"
-    "emas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001"
-    " \001(\005\022\022\n\nlong_value\030\002 
\001(\003\022\024\n\014double_value"
-    "\030\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 
\003(\0132\020.exec.s"
-    "hared.Jar\"/\n\003Jar\022\014\n\004name\030\001 
\001(\t\022\032\n\022functi"
-    "on_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmec"
-    "hanism\030\001 \001(\t\022\014\n\004data\030\002 
\001(\014\022\'\n\006status\030\003 \001"
-    "(\0162\027.exec.shared.SaslStatus*5\n\nRpcChanne"
-    
"l\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER"
-    
"\020\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n"
-    "\010PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_S"
-    "TATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020"
-    
"\000\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022"
-    
"\014\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005"
-    "\022\032\n\026CANCELLATION_REQUESTED\020\006*\222\t\n\020CoreOpe"
-    "ratorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROADCAS"
-    "T_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREGATE"
-    
"\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HAS"
-    "H_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGI"
-    "NG_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_SENDE"
-    
"R\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIVER\020\013"
-    "\022\032\n\026RANGE_PARTITION_SENDER\020\014\022\n\n\006SCREEN\020\r"
-    "\022\034\n\030SELECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAM"
-    "ING_AGGREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTER"
-    
"NAL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_"
-    "SORT\020\024\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHI"
-    "VE_SUB_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\r"
-    "MOCK_SUB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017D"
-    "IRECT_SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEX"
-    "T_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_"
-    "SCHEMA_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025"
-    "\n\021PRODUCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020"
-    "!\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rA"
-    "VRO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_SCAN\020%\022\022\n\016KAF"
-    "KA_SUB_SCAN\020&\022\021\n\rKUDU_SUB_SCAN\020\'\022\013\n\007FLAT"
-    "TEN\020(\022\020\n\014LATERAL_JOIN\020)\022\n\n\006UNNEST\020*\022,\n(H"
-    "IVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN\020"
-    "+\022\r\n\tJDBC_SCAN\020,\022\022\n\016REGEX_SUB_SCAN\020-\022\023\n\017"
-    "MAPRDB_SUB_SCAN\020.\022\022\n\016MONGO_SUB_SCAN\020/\022\017\n"
-    "\013KUDU_WRITER\0200\022\026\n\022OPEN_TSDB_SUB_SCAN\0201\022\017"
-    "\n\013JSON_WRITER\0202\022\026\n\022HTPPD_LOG_SUB_SCAN\0203\022"
-    "\022\n\016IMAGE_SUB_SCAN\0204\022\025\n\021SEQUENCE_SUB_SCAN"
-    "\0205\022\023\n\017PARTITION_LIMIT\0206\022\023\n\017PCAPNG_SUB_SC"
-    "AN\0207\022\022\n\016RUNTIME_FILTER\0208\022\017\n\013ROWKEY_JOIN\020"
-    "9*g\n\nSaslStatus\022\020\n\014SASL_UNKNOWN\020\000\022\016\n\nSAS"
-    "L_START\020\001\022\024\n\020SASL_IN_PROGRESS\020\002\022\020\n\014SASL_"
-    "SUCCESS\020\003\022\017\n\013SASL_FAILED\020\004B.\n\033org.apache"
-    ".drill.exec.protoB\rUserBitSharedH\001", 5474);
+    "\005\022\017\n\007end_row\030\005 
\001(\005\"\233\001\n\016RecordBatchDef\022\024\n"
+    "\014record_count\030\001 \001(\005\022+\n\005field\030\002 
\003(\0132\034.exe"
+    "c.shared.SerializedField\022)\n!carries_two_"
+    "byte_selection_vector\030\003 \001(\010\022\033\n\023affected_"
+    "rows_count\030\004 
\001(\005\"\205\001\n\010NamePart\022(\n\004type\030\001 "
+    "\001(\0162\032.exec.shared.NamePart.Type\022\014\n\004name\030"
+    "\002 \001(\t\022$\n\005child\030\003 \001(\0132\025.exec.shared.NameP"
+    
"art\"\033\n\004Type\022\010\n\004NAME\020\000\022\t\n\005ARRAY\020\001\"\324\001\n\017Ser"
+    "ializedField\022%\n\nmajor_type\030\001 \001(\0132\021.commo"
+    "n.MajorType\022(\n\tname_part\030\002 \001(\0132\025.exec.sh"
+    "ared.NamePart\022+\n\005child\030\003 \003(\0132\034.exec.shar"
+    "ed.SerializedField\022\023\n\013value_count\030\004 \001(\005\022"
+    "\027\n\017var_byte_length\030\005 \001(\005\022\025\n\rbuffer_lengt"
+    "h\030\007 \001(\005\"7\n\nNodeStatus\022\017\n\007node_id\030\001 
\001(\005\022\030"
+    "\n\020memory_footprint\030\002 \001(\003\"\263\002\n\013QueryResult"
+    "\0228\n\013query_state\030\001 \001(\0162#.exec.shared.Quer"
+    "yResult.QueryState\022&\n\010query_id\030\002 \001(\0132\024.e"
+    "xec.shared.QueryId\022(\n\005error\030\003 \003(\0132\031.exec"
+    ".shared.DrillPBError\"\227\001\n\nQueryState\022\014\n\010S"
+    
"TARTING\020\000\022\013\n\007RUNNING\020\001\022\r\n\tCOMPLETED\020\002\022\014\n"
+    "\010CANCELED\020\003\022\n\n\006FAILED\020\004\022\032\n\026CANCELLATION_"
+    
"REQUESTED\020\005\022\014\n\010ENQUEUED\020\006\022\r\n\tPREPARING\020\007"
+    "\022\014\n\010PLANNING\020\010\"\215\001\n\tQueryData\022&\n\010query_id"
+    "\030\001 \001(\0132\024.exec.shared.QueryId\022\021\n\trow_coun"
+    "t\030\002 \001(\005\022(\n\003def\030\003 
\001(\0132\033.exec.shared.Recor"
+    "dBatchDef\022\033\n\023affected_rows_count\030\004 \001(\005\"\330"
+    "\001\n\tQueryInfo\022\r\n\005query\030\001 
\001(\t\022\r\n\005start\030\002 \001"
+    "(\003\0222\n\005state\030\003 \001(\0162#.exec.shared.QueryRes"
+    "ult.QueryState\022\017\n\004user\030\004 \001(\t:\001-\022\'\n\007forem"
+    "an\030\005 \001(\0132\026.exec.DrillbitEndpoint\022\024\n\014opti"
+    "ons_json\030\006 \001(\t\022\022\n\ntotal_cost\030\007 
\001(\001\022\025\n\nqu"
+    "eue_name\030\010 \001(\t:\001-\"\263\004\n\014QueryProfile\022 \n\002id"
+    "\030\001 \001(\0132\024.exec.shared.QueryId\022$\n\004type\030\002 \001"
+    "(\0162\026.exec.shared.QueryType\022\r\n\005start\030\003 \001("
+    "\003\022\013\n\003end\030\004 \001(\003\022\r\n\005query\030\005 
\001(\t\022\014\n\004plan\030\006 "
+    "\001(\t\022\'\n\007foreman\030\007 \001(\0132\026.exec.DrillbitEndp"
+    "oint\0222\n\005state\030\010 \001(\0162#.exec.shared.QueryR"
+    "esult.QueryState\022\027\n\017total_fragments\030\t \001("
+    "\005\022\032\n\022finished_fragments\030\n \001(\005\022;\n\020fragmen"
+    "t_profile\030\013 \003(\0132!.exec.shared.MajorFragm"
+    "entProfile\022\017\n\004user\030\014 \001(\t:\001-\022\r\n\005error\030\r 
\001"
+    "(\t\022\024\n\014verboseError\030\016 
\001(\t\022\020\n\010error_id\030\017 \001"
+    "(\t\022\022\n\nerror_node\030\020 
\001(\t\022\024\n\014options_json\030\021"
+    " \001(\t\022\017\n\007planEnd\030\022 
\001(\003\022\024\n\014queueWaitEnd\030\023 "
+    "\001(\003\022\022\n\ntotal_cost\030\024 
\001(\001\022\025\n\nqueue_name\030\025 "
+    "\001(\t:\001-\022\017\n\007queryId\030\026 \001(\t\"t\n\024MajorFragment"
+    "Profile\022\031\n\021major_fragment_id\030\001 \001(\005\022A\n\026mi"
+    "nor_fragment_profile\030\002 \003(\0132!.exec.shared"
+    ".MinorFragmentProfile\"\350\002\n\024MinorFragmentP"
+    "rofile\022)\n\005state\030\001 \001(\0162\032.exec.shared.Frag"
+    "mentState\022(\n\005error\030\002 \001(\0132\031.exec.shared.D"
+    "rillPBError\022\031\n\021minor_fragment_id\030\003 \001(\005\0226"
+    "\n\020operator_profile\030\004 \003(\0132\034.exec.shared.O"
+    "peratorProfile\022\022\n\nstart_time\030\005 \001(\003\022\020\n\010en"
+    "d_time\030\006 \001(\003\022\023\n\013memory_used\030\007 
\001(\003\022\027\n\017max"
+    "_memory_used\030\010 \001(\003\022(\n\010endpoint\030\t \001(\0132\026.e"
+    "xec.DrillbitEndpoint\022\023\n\013last_update\030\n \001("
+    "\003\022\025\n\rlast_progress\030\013 
\001(\003\"\377\001\n\017OperatorPro"
+    "file\0221\n\rinput_profile\030\001 \003(\0132\032.exec.share"
+    "d.StreamProfile\022\023\n\013operator_id\030\003 \001(\005\022\025\n\r"
+    "operator_type\030\004 \001(\005\022\023\n\013setup_nanos\030\005 
\001(\003"
+    "\022\025\n\rprocess_nanos\030\006 \001(\003\022#\n\033peak_local_me"
+    "mory_allocated\030\007 \001(\003\022(\n\006metric\030\010 
\003(\0132\030.e"
+    "xec.shared.MetricValue\022\022\n\nwait_nanos\030\t \001"
+    "(\003\"B\n\rStreamProfile\022\017\n\007records\030\001 
\001(\003\022\017\n\007"
+    "batches\030\002 \001(\003\022\017\n\007schemas\030\003 
\001(\003\"J\n\013Metric"
+    "Value\022\021\n\tmetric_id\030\001 
\001(\005\022\022\n\nlong_value\030\002"
+    " \001(\003\022\024\n\014double_value\030\003 
\001(\001\")\n\010Registry\022\035"
+    "\n\003jar\030\001 
\003(\0132\020.exec.shared.Jar\"/\n\003Jar\022\014\n\004"
+    "name\030\001 \001(\t\022\032\n\022function_signature\030\002 \003(\t\"W"
+    "\n\013SaslMessage\022\021\n\tmechanism\030\001 \001(\t\022\014\n\004data"
+    "\030\002 \001(\014\022\'\n\006status\030\003 
\001(\0162\027.exec.shared.Sas"
+    "lStatus*5\n\nRpcChannel\022\017\n\013BIT_CONTROL\020\000\022\014"
+    
"\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*V\n\tQueryType\022\007\n\003S"
+    
"QL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL\020\003\022\r\n\tEXECU"
+    "TION\020\004\022\026\n\022PREPARED_STATEMENT\020\005*\207\001\n\rFragm"
+    "entState\022\013\n\007SENDING\020\000\022\027\n\023AWAITING_ALLOCA"
+    
"TION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISHED\020\003\022\r\n\tCAN"
+    "CELLED\020\004\022\n\n\006FAILED\020\005\022\032\n\026CANCELLATION_REQ"
+    "UESTED\020\006*\222\t\n\020CoreOperatorType\022\021\n\rSINGLE_"
+    "SENDER\020\000\022\024\n\020BROADCAST_SENDER\020\001\022\n\n\006FILTER"
+    
"\020\002\022\022\n\016HASH_AGGREGATE\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n"
+    "\nMERGE_JOIN\020\005\022\031\n\025HASH_PARTITION_SENDER\020\006"
+    
"\022\t\n\005LIMIT\020\007\022\024\n\020MERGING_RECEIVER\020\010\022\034\n\030ORD"
+    "ERED_PARTITION_SENDER\020\t\022\013\n\007PROJECT\020\n\022\026\n\022"
+    "UNORDERED_RECEIVER\020\013\022\032\n\026RANGE_PARTITION_"
+    "SENDER\020\014\022\n\n\006SCREEN\020\r\022\034\n\030SELECTION_VECTOR"
+    "_REMOVER\020\016\022\027\n\023STREAMING_AGGREGATE\020\017\022\016\n\nT"
+    "OP_N_SORT\020\020\022\021\n\rEXTERNAL_SORT\020\021\022\t\n\005TRACE\020"
+    
"\022\022\t\n\005UNION\020\023\022\014\n\010OLD_SORT\020\024\022\032\n\026PARQUET_RO"
+    "W_GROUP_SCAN\020\025\022\021\n\rHIVE_SUB_SCAN\020\026\022\025\n\021SYS"
+    "TEM_TABLE_SCAN\020\027\022\021\n\rMOCK_SUB_SCAN\020\030\022\022\n\016P"
+    "ARQUET_WRITER\020\031\022\023\n\017DIRECT_SUB_SCAN\020\032\022\017\n\013"
+    "TEXT_WRITER\020\033\022\021\n\rTEXT_SUB_SCAN\020\034\022\021\n\rJSON"
+    "_SUB_SCAN\020\035\022\030\n\024INFO_SCHEMA_SUB_SCAN\020\036\022\023\n"
+    "\017COMPLEX_TO_JSON\020\037\022\025\n\021PRODUCER_CONSUMER\020"
+    " 
\022\022\n\016HBASE_SUB_SCAN\020!\022\n\n\006WINDOW\020\"\022\024\n\020NES"
+    "TED_LOOP_JOIN\020#\022\021\n\rAVRO_SUB_SCAN\020$\022\021\n\rPC"
+    "AP_SUB_SCAN\020%\022\022\n\016KAFKA_SUB_SCAN\020&\022\021\n\rKUD"
+    "U_SUB_SCAN\020\'\022\013\n\007FLATTEN\020(\022\020\n\014LATERAL_JOI"
+    "N\020)\022\n\n\006UNNEST\020*\022,\n(HIVE_DRILL_NATIVE_PAR"
+    "QUET_ROW_GROUP_SCAN\020+\022\r\n\tJDBC_SCAN\020,\022\022\n\016"
+    "REGEX_SUB_SCAN\020-\022\023\n\017MAPRDB_SUB_SCAN\020.\022\022\n"
+    "\016MONGO_SUB_SCAN\020/\022\017\n\013KUDU_WRITER\0200\022\026\n\022OP"
+    "EN_TSDB_SUB_SCAN\0201\022\017\n\013JSON_WRITER\0202\022\026\n\022H"
+    "TPPD_LOG_SUB_SCAN\0203\022\022\n\016IMAGE_SUB_SCAN\0204\022"
+    "\025\n\021SEQUENCE_SUB_SCAN\0205\022\023\n\017PARTITION_LIMI"
+    "T\0206\022\023\n\017PCAPNG_SUB_SCAN\0207\022\022\n\016RUNTIME_FILT"
+    "ER\0208\022\017\n\013ROWKEY_JOIN\0209*g\n\nSaslStatus\022\020\n\014S"
+    "ASL_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020SASL_IN"
+    "_PROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013SASL_FA"
+    "ILED\020\004B.\n\033org.apache.drill.exec.protoB\rU"
+    "serBitSharedH\001", 5534);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "UserBitShared.proto", &protobuf_RegisterTypes);
   UserCredentials::default_instance_ = new UserCredentials();
@@ -3076,6 +3080,7 @@ ::google::protobuf::Metadata ParsingError::GetMetadata() 
const {
 const int RecordBatchDef::kRecordCountFieldNumber;
 const int RecordBatchDef::kFieldFieldNumber;
 const int RecordBatchDef::kCarriesTwoByteSelectionVectorFieldNumber;
+const int RecordBatchDef::kAffectedRowsCountFieldNumber;
 #endif  // !_MSC_VER
 
 RecordBatchDef::RecordBatchDef()
@@ -3096,6 +3101,7 @@ void RecordBatchDef::SharedCtor() {
   _cached_size_ = 0;
   record_count_ = 0;
   carries_two_byte_selection_vector_ = false;
+  affected_rows_count_ = 0;
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -3133,6 +3139,7 @@ void RecordBatchDef::Clear() {
   if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
     record_count_ = 0;
     carries_two_byte_selection_vector_ = false;
+    affected_rows_count_ = 0;
   }
   field_.Clear();
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
@@ -3187,6 +3194,22 @@ bool RecordBatchDef::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
+        if (input->ExpectTag(32)) goto parse_affected_rows_count;
+        break;
+      }
+
+      // optional int32 affected_rows_count = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) 
==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_affected_rows_count:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, 
::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &affected_rows_count_)));
+          set_has_affected_rows_count();
+        } else {
+          goto handle_uninterpreted;
+        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -3225,6 +3248,11 @@ void RecordBatchDef::SerializeWithCachedSizes(
     ::google::protobuf::internal::WireFormatLite::WriteBool(3, 
this->carries_two_byte_selection_vector(), output);
   }
 
+  // optional int32 affected_rows_count = 4;
+  if (has_affected_rows_count()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, 
this->affected_rows_count(), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -3250,6 +3278,11 @@ ::google::protobuf::uint8* 
RecordBatchDef::SerializeWithCachedSizesToArray(
     target = ::google::protobuf::internal::WireFormatLite::WriteBoolToArray(3, 
this->carries_two_byte_selection_vector(), target);
   }
 
+  // optional int32 affected_rows_count = 4;
+  if (has_affected_rows_count()) {
+    target = 
::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(4, 
this->affected_rows_count(), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = 
::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -3273,6 +3306,13 @@ int RecordBatchDef::ByteSize() const {
       total_size += 1 + 1;
     }
 
+    // optional int32 affected_rows_count = 4;
+    if (has_affected_rows_count()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->affected_rows_count());
+    }
+
   }
   // repeated .exec.shared.SerializedField field = 2;
   total_size += 1 * this->field_size();
@@ -3315,6 +3355,9 @@ void RecordBatchDef::MergeFrom(const RecordBatchDef& 
from) {
     if (from.has_carries_two_byte_selection_vector()) {
       
set_carries_two_byte_selection_vector(from.carries_two_byte_selection_vector());
     }
+    if (from.has_affected_rows_count()) {
+      set_affected_rows_count(from.affected_rows_count());
+    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -3341,6 +3384,7 @@ void RecordBatchDef::Swap(RecordBatchDef* other) {
     std::swap(record_count_, other->record_count_);
     field_.Swap(&other->field_);
     std::swap(carries_two_byte_selection_vector_, 
other->carries_two_byte_selection_vector_);
+    std::swap(affected_rows_count_, other->affected_rows_count_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);
@@ -4699,6 +4743,7 @@ ::google::protobuf::Metadata QueryResult::GetMetadata() 
const {
 const int QueryData::kQueryIdFieldNumber;
 const int QueryData::kRowCountFieldNumber;
 const int QueryData::kDefFieldNumber;
+const int QueryData::kAffectedRowsCountFieldNumber;
 #endif  // !_MSC_VER
 
 QueryData::QueryData()
@@ -4722,6 +4767,7 @@ void QueryData::SharedCtor() {
   query_id_ = NULL;
   row_count_ = 0;
   def_ = NULL;
+  affected_rows_count_ = 0;
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -4766,6 +4812,7 @@ void QueryData::Clear() {
     if (has_def()) {
       if (def_ != NULL) def_->::exec::shared::RecordBatchDef::Clear();
     }
+    affected_rows_count_ = 0;
   }
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
   mutable_unknown_fields()->Clear();
@@ -4816,6 +4863,22 @@ bool QueryData::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
+        if (input->ExpectTag(32)) goto parse_affected_rows_count;
+        break;
+      }
+
+      // optional int32 affected_rows_count = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) 
==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_affected_rows_count:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, 
::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &affected_rows_count_)));
+          set_has_affected_rows_count();
+        } else {
+          goto handle_uninterpreted;
+        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -4855,6 +4918,11 @@ void QueryData::SerializeWithCachedSizes(
       3, this->def(), output);
   }
 
+  // optional int32 affected_rows_count = 4;
+  if (has_affected_rows_count()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, 
this->affected_rows_count(), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -4882,6 +4950,11 @@ ::google::protobuf::uint8* 
QueryData::SerializeWithCachedSizesToArray(
         3, this->def(), target);
   }
 
+  // optional int32 affected_rows_count = 4;
+  if (has_affected_rows_count()) {
+    target = 
::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(4, 
this->affected_rows_count(), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = 
::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -4914,6 +4987,13 @@ int QueryData::ByteSize() const {
           this->def());
     }
 
+    // optional int32 affected_rows_count = 4;
+    if (has_affected_rows_count()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->affected_rows_count());
+    }
+
   }
   if (!unknown_fields().empty()) {
     total_size +=
@@ -4950,6 +5030,9 @@ void QueryData::MergeFrom(const QueryData& from) {
     if (from.has_def()) {
       mutable_def()->::exec::shared::RecordBatchDef::MergeFrom(from.def());
     }
+    if (from.has_affected_rows_count()) {
+      set_affected_rows_count(from.affected_rows_count());
+    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -4976,6 +5059,7 @@ void QueryData::Swap(QueryData* other) {
     std::swap(query_id_, other->query_id_);
     std::swap(row_count_, other->row_count_);
     std::swap(def_, other->def_);
+    std::swap(affected_rows_count_, other->affected_rows_count_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.h 
b/contrib/native/client/src/protobuf/UserBitShared.pb.h
index ab3063de14d..3fa19118cd1 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.h
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.h
@@ -1122,21 +1122,31 @@ class RecordBatchDef : public 
::google::protobuf::Message {
   inline bool carries_two_byte_selection_vector() const;
   inline void set_carries_two_byte_selection_vector(bool value);
 
+  // optional int32 affected_rows_count = 4;
+  inline bool has_affected_rows_count() const;
+  inline void clear_affected_rows_count();
+  static const int kAffectedRowsCountFieldNumber = 4;
+  inline ::google::protobuf::int32 affected_rows_count() const;
+  inline void set_affected_rows_count(::google::protobuf::int32 value);
+
   // @@protoc_insertion_point(class_scope:exec.shared.RecordBatchDef)
  private:
   inline void set_has_record_count();
   inline void clear_has_record_count();
   inline void set_has_carries_two_byte_selection_vector();
   inline void clear_has_carries_two_byte_selection_vector();
+  inline void set_has_affected_rows_count();
+  inline void clear_has_affected_rows_count();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
   ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField > 
field_;
   ::google::protobuf::int32 record_count_;
   bool carries_two_byte_selection_vector_;
+  ::google::protobuf::int32 affected_rows_count_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(3 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
 
   friend void  protobuf_AddDesc_UserBitShared_2eproto();
   friend void protobuf_AssignDesc_UserBitShared_2eproto();
@@ -1728,6 +1738,13 @@ class QueryData : public ::google::protobuf::Message {
   inline ::exec::shared::RecordBatchDef* release_def();
   inline void set_allocated_def(::exec::shared::RecordBatchDef* def);
 
+  // optional int32 affected_rows_count = 4;
+  inline bool has_affected_rows_count() const;
+  inline void clear_affected_rows_count();
+  static const int kAffectedRowsCountFieldNumber = 4;
+  inline ::google::protobuf::int32 affected_rows_count() const;
+  inline void set_affected_rows_count(::google::protobuf::int32 value);
+
   // @@protoc_insertion_point(class_scope:exec.shared.QueryData)
  private:
   inline void set_has_query_id();
@@ -1736,15 +1753,18 @@ class QueryData : public ::google::protobuf::Message {
   inline void clear_has_row_count();
   inline void set_has_def();
   inline void clear_has_def();
+  inline void set_has_affected_rows_count();
+  inline void clear_has_affected_rows_count();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
   ::exec::shared::QueryId* query_id_;
   ::exec::shared::RecordBatchDef* def_;
   ::google::protobuf::int32 row_count_;
+  ::google::protobuf::int32 affected_rows_count_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(3 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
 
   friend void  protobuf_AddDesc_UserBitShared_2eproto();
   friend void protobuf_AssignDesc_UserBitShared_2eproto();
@@ -4250,6 +4270,28 @@ inline void 
RecordBatchDef::set_carries_two_byte_selection_vector(bool value) {
   carries_two_byte_selection_vector_ = value;
 }
 
+// optional int32 affected_rows_count = 4;
+inline bool RecordBatchDef::has_affected_rows_count() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void RecordBatchDef::set_has_affected_rows_count() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void RecordBatchDef::clear_has_affected_rows_count() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void RecordBatchDef::clear_affected_rows_count() {
+  affected_rows_count_ = 0;
+  clear_has_affected_rows_count();
+}
+inline ::google::protobuf::int32 RecordBatchDef::affected_rows_count() const {
+  return affected_rows_count_;
+}
+inline void RecordBatchDef::set_affected_rows_count(::google::protobuf::int32 
value) {
+  set_has_affected_rows_count();
+  affected_rows_count_ = value;
+}
+
 // -------------------------------------------------------------------
 
 // NamePart
@@ -4796,6 +4838,28 @@ inline void 
QueryData::set_allocated_def(::exec::shared::RecordBatchDef* def) {
   }
 }
 
+// optional int32 affected_rows_count = 4;
+inline bool QueryData::has_affected_rows_count() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void QueryData::set_has_affected_rows_count() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void QueryData::clear_has_affected_rows_count() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void QueryData::clear_affected_rows_count() {
+  affected_rows_count_ = 0;
+  clear_has_affected_rows_count();
+}
+inline ::google::protobuf::int32 QueryData::affected_rows_count() const {
+  return affected_rows_count_;
+}
+inline void QueryData::set_affected_rows_count(::google::protobuf::int32 
value) {
+  set_has_affected_rows_count();
+  affected_rows_count_ = value;
+}
+
 // -------------------------------------------------------------------
 
 // QueryInfo
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 95168bc9632..76cbd3d9118 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -875,5 +875,10 @@ public static String bootDefaultFor(String name) {
 
   public static final String LIST_FILES_RECURSIVELY = 
"storage.list_files_recursively";
   public static final BooleanValidator LIST_FILES_RECURSIVELY_VALIDATOR = new 
BooleanValidator(LIST_FILES_RECURSIVELY,
-      new OptionDescription("Enables recursive files listing when querying the 
`INFORMATION_SCHEMA.FILES` table or executing the SHOW FILES command. Default 
is false. (Drill 1.15+"));
+      new OptionDescription("Enables recursive files listing when querying the 
`INFORMATION_SCHEMA.FILES` table or executing the SHOW FILES command. Default 
is false. (Drill 1.15+)"));
+
+  public static final String RETURN_RESULT_SET_FOR_DDL = 
"exec.return_result_set_for_ddl";
+  public static final BooleanValidator RETURN_RESULT_SET_FOR_DDL_VALIDATOR = 
new BooleanValidator(RETURN_RESULT_SET_FOR_DDL,
+      new OptionDescription("Controls whether to return result set for CREATE 
TABLE/VIEW, DROP TABLE/VIEW, SET, USE etc. queries. " +
+          "If set to false affected rows count will be returned instead and 
result set will be null. Default is true. (Drill 1.15+)"));
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/VectorRecordMaterializer.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/VectorRecordMaterializer.java
index 2d8c2310011..7cdf9b3c613 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/VectorRecordMaterializer.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/VectorRecordMaterializer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.physical.impl.materialize;
 
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.ops.OperatorContext;
@@ -25,13 +26,15 @@
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.record.WritableBatch;
+import org.apache.drill.exec.server.options.OptionManager;
 
-public class VectorRecordMaterializer implements RecordMaterializer{
+public class VectorRecordMaterializer implements RecordMaterializer {
   static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(VectorRecordMaterializer.class);
 
   private QueryId queryId;
   private RecordBatch batch;
   private BufferAllocator allocator;
+  private OptionManager options;
 
   public VectorRecordMaterializer(FragmentContext context, OperatorContext 
oContext, RecordBatch batch) {
     this.queryId = context.getHandle().getQueryId();
@@ -39,21 +42,19 @@ public VectorRecordMaterializer(FragmentContext context, 
OperatorContext oContex
     this.allocator = oContext.getAllocator();
     BatchSchema schema = batch.getSchema();
     assert schema != null : "Schema must be defined.";
-
-//    for (MaterializedField f : batch.getSchema()) {
-//      logger.debug("New Field: {}", f);
-//    }
+    options = context.getOptions();
   }
 
   public QueryWritableBatch convertNext() {
-    //batch.getWritableBatch().getDef().getRecordCount()
     WritableBatch w = batch.getWritableBatch().transfer(allocator);
-
-    QueryData header = QueryData.newBuilder() //
-        .setQueryId(queryId) //
-        .setRowCount(batch.getRecordCount()) //
-        .setDef(w.getDef()).build();
-    QueryWritableBatch batch = new QueryWritableBatch(header, w.getBuffers());
-    return batch;
+    QueryData.Builder builder = QueryData.newBuilder()
+        .setQueryId(queryId)
+        .setRowCount(batch.getRecordCount())
+        .setDef(w.getDef());
+    if (!options.getBoolean(ExecConstants.RETURN_RESULT_SET_FOR_DDL)) {
+      int count = w.getDef().getAffectedRowsCount();
+      builder.setAffectedRowsCount(count == -1 ? 0 : count);
+    }
+    return new QueryWritableBatch(builder.build(), w.getBuffers());
   }
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
index 41faea96c13..7a4fcdf5b07 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
@@ -20,10 +20,12 @@
 import java.io.IOException;
 
 import org.apache.calcite.sql.SqlDescribeSchema;
+import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
@@ -35,7 +37,6 @@
 import org.apache.drill.exec.planner.sql.handlers.SqlHandlerConfig;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlCall;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlDescribeTable;
-import org.apache.drill.exec.planner.sql.parser.SqlCreateTable;
 import org.apache.drill.exec.testing.ControlsInjector;
 import org.apache.drill.exec.testing.ControlsInjectorFactory;
 import org.apache.drill.exec.util.Pointer;
@@ -110,7 +111,7 @@ private static PhysicalPlan getQueryPlan(QueryContext 
context, String sql, Point
     final AbstractSqlHandler handler;
     final SqlHandlerConfig config = new SqlHandlerConfig(context, parser);
 
-    switch(sqlNode.getKind()){
+    switch(sqlNode.getKind()) {
     case EXPLAIN:
       handler = new ExplainHandler(config, textPlan);
       break;
@@ -127,14 +128,16 @@ private static PhysicalPlan getQueryPlan(QueryContext 
context, String sql, Point
         handler = new DescribeSchemaHandler(config);
         break;
       }
+    case CREATE_TABLE:
+      handler = ((DrillSqlCall) sqlNode).getSqlHandler(config, textPlan);
+      break;
+    case DROP_TABLE:
+    case CREATE_VIEW:
+    case DROP_VIEW:
+    case OTHER_DDL:
     case OTHER:
-      if(sqlNode instanceof SqlCreateTable) {
-        handler = ((DrillSqlCall)sqlNode).getSqlHandler(config, textPlan);
-        break;
-      }
-
       if (sqlNode instanceof DrillSqlCall) {
-        handler = ((DrillSqlCall)sqlNode).getSqlHandler(config);
+        handler = ((DrillSqlCall) sqlNode).getSqlHandler(config);
         break;
       }
       // fallthrough
@@ -142,6 +145,12 @@ private static PhysicalPlan getQueryPlan(QueryContext 
context, String sql, Point
       handler = new DefaultSqlHandler(config, textPlan);
     }
 
+    boolean returnResultSet = 
context.getOptions().getBoolean(ExecConstants.RETURN_RESULT_SET_FOR_DDL);
+    // Determine whether result set should be returned for the query based on 
`exec.return_result_set_for_ddl`
+    // and sql node kind. Overrides the option on a query level.
+    
context.getOptions().setLocalOption(ExecConstants.RETURN_RESULT_SET_FOR_DDL,
+        returnResultSet || !SqlKind.DDL.contains(sqlNode.getKind()));
+
     try {
       return handler.getPlan(sqlNode);
     } catch(ValidationException e) {
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateFunction.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateFunction.java
index eeab237f48c..88618879223 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateFunction.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateFunction.java
@@ -36,7 +36,7 @@
 
   private final SqlNode jar;
 
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_FUNCTION", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_FUNCTION", SqlKind.OTHER_DDL) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlCreateFunction(pos, operands[0]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateTable.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateTable.java
index 48b2e1b2704..11e33aa524d 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateTable.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateTable.java
@@ -41,7 +41,7 @@
 import org.apache.drill.exec.util.Pointer;
 
 public class SqlCreateTable extends DrillSqlCall {
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_TABLE", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_TABLE", SqlKind.CREATE_TABLE) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       Preconditions.checkArgument(operands.length == 6, 
"SqlCreateTable.createCall() has to get 6 operands!");
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateView.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateView.java
index 9e8bd8ab4a2..f61aeaa397b 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateView.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlCreateView.java
@@ -37,7 +37,7 @@
 import java.util.List;
 
 public class SqlCreateView extends DrillSqlCall {
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_VIEW", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("CREATE_VIEW", SqlKind.CREATE_VIEW) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlCreateView(pos, (SqlIdentifier) operands[0], (SqlNodeList) 
operands[1], operands[2], (SqlLiteral) operands[3]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropFunction.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropFunction.java
index b7fef1b9d5c..94839f30c90 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropFunction.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropFunction.java
@@ -36,7 +36,7 @@
 
   private final SqlNode jar;
 
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_FUNCTION", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_FUNCTION", SqlKind.OTHER_DDL) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlDropFunction(pos, operands[0]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropTable.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropTable.java
index cbdb415771a..dbc67886fa1 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropTable.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropTable.java
@@ -35,7 +35,7 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 
 public class SqlDropTable extends DrillSqlCall {
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_TABLE", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_TABLE", SqlKind.DROP_TABLE) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlDropTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) 
operands[1]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropView.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropView.java
index d137cb93c46..bfd3474262a 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropView.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlDropView.java
@@ -35,7 +35,7 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 
 public class SqlDropView extends DrillSqlCall {
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_VIEW", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("DROP_VIEW", SqlKind.DROP_VIEW) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlDropView(pos, (SqlIdentifier) operands[0], (SqlLiteral) 
operands[1]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlRefreshMetadata.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlRefreshMetadata.java
index 72fc397bbc0..84d95f4e04e 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlRefreshMetadata.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlRefreshMetadata.java
@@ -40,7 +40,7 @@
  * REFRESH TABLE METADATA tblname
  */
 public class SqlRefreshMetadata extends DrillSqlCall {
-  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("REFRESH_TABLE_METADATA", SqlKind.OTHER) {
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("REFRESH_TABLE_METADATA", SqlKind.OTHER_DDL) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlRefreshMetadata(pos, (SqlIdentifier) operands[0]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlUseSchema.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlUseSchema.java
index df2bc1d7098..8c05dc48e26 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlUseSchema.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/SqlUseSchema.java
@@ -38,8 +38,7 @@
  */
 public class SqlUseSchema extends DrillSqlCall {
 
-  public static final SqlSpecialOperator OPERATOR =
-      new SqlSpecialOperator("USE_SCHEMA", SqlKind.OTHER){
+  public static final SqlSpecialOperator OPERATOR = new 
SqlSpecialOperator("USE_SCHEMA", SqlKind.OTHER_DDL) {
     @Override
     public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, 
SqlNode... operands) {
       return new SqlUseSchema(pos, (SqlIdentifier) operands[0]);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionList.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionList.java
index e2161aa3e4c..bef69907083 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionList.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionList.java
@@ -18,10 +18,18 @@
 package org.apache.drill.exec.server.options;
 
 import java.util.ArrayList;
+import java.util.Collection;
 
 @SuppressWarnings("serial")
 public class OptionList extends ArrayList<OptionValue>{
 
+  public OptionList() {
+  }
+
+  public OptionList(Collection<OptionValue> options) {
+    super(options);
+  }
+
   public void merge(OptionList list){
     this.addAll(list);
   }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/QueryOptionManager.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/QueryOptionManager.java
index 1c7d2988676..124172671fd 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/QueryOptionManager.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/QueryOptionManager.java
@@ -19,6 +19,9 @@
 
 import org.apache.drill.common.map.CaseInsensitiveMap;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * {@link OptionManager} that holds options within {@link 
org.apache.drill.exec.ops.QueryContext}.
  */
@@ -31,9 +34,14 @@ public QueryOptionManager(OptionManager sessionOptions) {
 
   @Override
   public OptionList getOptionList() {
-    OptionList list = super.getOptionList();
-    list.merge(fallback.getOptionList());
-    return list;
+    Map<String, OptionValue> optionMap = new HashMap<>();
+    for (OptionValue option : fallback.getOptionList()) {
+      optionMap.put(option.name, option);
+    }
+    for (OptionValue option : super.getOptionList()) {
+      optionMap.put(option.name, option);
+    }
+    return new OptionList(optionMap.values());
   }
 
   @Override
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 7897c3b0096..37934c8e9cf 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -267,7 +267,8 @@
       new 
OptionDefinition(ExecConstants.OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR_VALIDATOR, 
new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM, true, false)),
       new OptionDefinition(ExecConstants.FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR, 
new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM, true, true)),
       new OptionDefinition(ExecConstants.LIST_FILES_RECURSIVELY_VALIDATOR),
-      new OptionDefinition(ExecConstants.QUERY_ROWKEYJOIN_BATCHSIZE)
+      new OptionDefinition(ExecConstants.QUERY_ROWKEYJOIN_BATCHSIZE),
+      new OptionDefinition(ExecConstants.RETURN_RESULT_SET_FOR_DDL_VALIDATOR)
     };
 
     CaseInsensitiveMap<OptionDefinition> map = Arrays.stream(definitions)
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
index a8c3c84283a..f6c7c08dc66 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
@@ -35,7 +35,7 @@
 
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 
-/*
+/**
  * Extends the original Option iterator. The idea is to hide the 
implementation details and present the
  * user with the rows which have values set at the top level of hierarchy and 
exclude the values set
  * at lower levels. This is done by examining the scope and the precedence 
order of scope is session - system - default.
@@ -55,22 +55,16 @@
  *  only the value set at SESSION level.
  */
 public class ExtendedOptionIterator implements Iterator<Object> {
-  //private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(ExtendedOptionIterator.class);
 
   private final OptionManager fragmentOptions;
   private final Iterator<OptionValue> mergedOptions;
   private Map<OptionValue.Kind, String> typeMapping;
-  private Map<OptionScope, Integer> preference;
   private static final int SHORT_DESCRIP_MAX_SIZE = 110;
 
   public ExtendedOptionIterator(FragmentContext context, boolean internal) {
     fragmentOptions = context.getOptions();
-    preference = new HashMap<OptionScope, Integer>();
-    preference.put(OptionScope.SESSION, 0);
-    preference.put(OptionScope.SYSTEM, 1);
-    preference.put(OptionScope.BOOT, 2);
 
-    typeMapping = new HashMap<Kind, String>();
+    typeMapping = new HashMap<>();
     typeMapping.put(Kind.STRING, "VARCHAR");
     typeMapping.put(Kind.DOUBLE, "FLOAT");
     typeMapping.put(Kind.LONG, "BIGINT");
@@ -92,9 +86,14 @@ public ExtendedOptionIterator(FragmentContext context, 
boolean internal) {
     HashMap<String, OptionValue> optionsmap = new HashMap<>();
 
     for (OptionValue option : optionslist) {
+      if (option.scope == OptionScope.QUERY) {
+        // Option set on query level should be ignored here as its value 
should not be shown to user
+        continue;
+      }
+
       if (optionsmap.containsKey(option.getName())) {
 
-        if (preference.get(option.scope) < 
preference.get(optionsmap.get(option.getName()).scope)) {
+        if (option.scope.compareTo(optionsmap.get(option.getName()).scope) > 
0) {
           optionsmap.put(option.getName(), option);
         }
 
diff --git a/exec/java-exec/src/main/resources/drill-module.conf 
b/exec/java-exec/src/main/resources/drill-module.conf
index 632b4ed5057..8aa3233a904 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -644,5 +644,5 @@ drill.exec.options: {
     planner.index.prefer_intersect_plans: false,
     planner.index.max_indexes_to_intersect: 5,
     exec.query.rowkeyjoin_batchsize: 128,
-
+    exec.return_result_set_for_ddl: true,
 }
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillStatement.java 
b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillStatement.java
index 1bddacb4390..f93ef3be080 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillStatement.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillStatement.java
@@ -17,6 +17,8 @@
  */
 package org.apache.drill.jdbc;
 
+import org.apache.calcite.avatica.AvaticaResultSet;
+
 import java.sql.SQLException;
 import java.sql.Statement;
 
@@ -60,4 +62,7 @@ void setQueryTimeout( int seconds )
   @Override
   boolean isClosed();
 
+  void setResultSet(AvaticaResultSet resultSet);
+
+  void setUpdateCount(int value);
 }
diff --git 
a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java 
b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
index 20b065225ca..888a90993f8 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
@@ -29,6 +29,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.drill.jdbc.DrillStatement;
 import org.apache.drill.shaded.guava.com.google.common.base.Stopwatch;
 import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.ColumnMetaData;
@@ -325,7 +326,7 @@ void close() {
    * <p>
    *   (Relates to {@link #loadInitialSchema()}'s calling
    *   {@link #nextRowInternally()} one "extra" time (extra relative to number
-   *   of {@link ResultSet#next()} calls) at the beginning to get first batch
+   *   of {@link java.sql.ResultSet#next()} calls) at the beginning to get 
first batch
    *   and schema before {@code Statement.execute...(...)} even returns.)
    * </p>
    */
@@ -450,7 +451,7 @@ private void updateColumns() {
    * <p>
    *   Is to be called (once) from {@link #loadInitialSchema} for
    *   {@link DrillResultSetImpl#execute()}, and then (repeatedly) from
-   *   {@link #next()} for {@link AvaticaResultSet#next()}.
+   *   {@link #next()} for {@link 
org.apache.calcite.avatica.AvaticaResultSet#next()}.
    * </p>
    *
    * @return  whether cursor is positioned at a row (false when after end of
@@ -499,6 +500,13 @@ private boolean nextRowInternally() throws SQLException {
 
           currentRecordNumber = 0;
 
+          if (qrb.getHeader().hasAffectedRowsCount()) {
+            int updateCount = qrb.getHeader().getAffectedRowsCount();
+            int currentUpdateCount = statement.getUpdateCount() == -1 ? 0 : 
statement.getUpdateCount();
+            ((DrillStatement) statement).setUpdateCount(updateCount + 
currentUpdateCount);
+            ((DrillStatement) statement).setResultSet(null);
+          }
+
           final boolean schemaChanged;
           try {
             schemaChanged = currentBatchHolder.load(qrb.getHeader().getDef(),
@@ -549,7 +557,7 @@ private boolean nextRowInternally() throws SQLException {
    * Advances to first batch to load schema data into result set metadata.
    * <p>
    *   To be called once from {@link DrillResultSetImpl#execute()} before
-   *   {@link #next()} is called from {@link AvaticaResultSet#next()}.
+   *   {@link #next()} is called from {@link 
org.apache.calcite.avatica.AvaticaResultSet#next()}.
    * <p>
    */
   void loadInitialSchema() throws SQLException {
diff --git 
a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillStatementImpl.java 
b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillStatementImpl.java
index 5386ec68aa2..7255c759d9b 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillStatementImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillStatementImpl.java
@@ -21,6 +21,7 @@
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 
+import org.apache.calcite.avatica.AvaticaResultSet;
 import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.Meta.StatementHandle;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
@@ -259,4 +260,14 @@ public void setPoolable(boolean poolable) throws 
SQLException {
       throw new SQLFeatureNotSupportedException(e.getMessage(), e);
     }
   }
+
+  @Override
+  public void setResultSet(AvaticaResultSet resultSet) {
+    openResultSet = resultSet;
+  }
+
+  @Override
+  public void setUpdateCount(int value) {
+    updateCount = value;
+  }
 }
diff --git 
a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java 
b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
index 24a5661f2ed..ab854aa208f 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
@@ -18,6 +18,7 @@
 package org.apache.drill.jdbc.test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 
 import java.nio.file.Paths;
 import java.sql.Connection;
@@ -26,6 +27,7 @@
 import java.sql.Types;
 
 import org.apache.drill.categories.JdbcTest;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.jdbc.JdbcTestBase;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
@@ -403,4 +405,110 @@ public void testConvertFromInEmptyInputSql() throws 
Exception {
         .sql("SELECT CONVERT_FROM(columns[1], 'JSON') as col1 from 
cp.`empty.csv`")
         .returns("");
   }
+
+  @Test
+  public void testResultSetIsNotReturnedSet() throws Exception {
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      // Set any option
+      s.execute(String.format("SET `%s` = 'json'", 
ExecConstants.OUTPUT_FORMAT_OPTION));
+      assertNull("No result", s.getResultSet());
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedCTAS() throws Exception {
+    String tableName = "dfs.tmp.`ctas`";
+
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      s.execute(String.format("CREATE TABLE %s AS SELECT * FROM 
cp.`employee.json`", tableName));
+      assertNull("No result", s.getResultSet());
+    } finally {
+      execute("DROP TABLE IF EXISTS %s", tableName);
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedCreateView() throws Exception {
+    String viewName = "dfs.tmp.`cv`";
+
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      s.execute(String.format("CREATE VIEW %s AS SELECT * FROM 
cp.`employee.json`", viewName));
+      assertNull("No result", s.getResultSet());
+    } finally {
+      execute("DROP VIEW IF EXISTS %s", viewName);
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedDropTable() throws Exception {
+    String tableName = "dfs.tmp.`dt`";
+
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      s.execute(String.format("CREATE TABLE %s AS SELECT * FROM 
cp.`employee.json`", tableName));
+
+      s.execute(String.format("DROP TABLE %s", tableName));
+      assertNull("No result", s.getResultSet());
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedDropView() throws Exception {
+    String viewName = "dfs.tmp.`dv`";
+
+    try (Connection conn = connect();
+         Statement stmt = conn.createStatement()) {
+      stmt.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      stmt.execute(String.format("CREATE VIEW %s AS SELECT * FROM 
cp.`employee.json`", viewName));
+
+      stmt.execute(String.format("DROP VIEW %s", viewName));
+      assertNull("No result", stmt.getResultSet());
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedUse() throws Exception {
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      s.execute("USE dfs.tmp");
+      assertNull("No result", s.getResultSet());
+    }
+  }
+
+  @Test
+  public void testResultSetIsNotReturnedRefreshMetadata() throws Exception {
+    String tableName = "dfs.tmp.`rm`";
+
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format("SET `%s` = false", 
ExecConstants.RETURN_RESULT_SET_FOR_DDL));
+
+      s.execute(String.format("CREATE TABLE %s AS SELECT * FROM 
cp.`employee.json`", tableName));
+
+      s.execute(String.format("REFRESH TABLE METADATA %s", tableName));
+      assertNull("No result", s.getResultSet());
+    }
+  }
+
+  private static void execute(String sql, Object... params) throws Exception {
+    try (Connection conn = connect();
+         Statement s = conn.createStatement()) {
+      s.execute(String.format(sql, params));
+    }
+  }
 }
diff --git 
a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java 
b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
index 7b845a49342..ba647626d38 100644
--- 
a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
+++ 
b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
@@ -830,6 +830,8 @@ public void writeTo(com.dyuproject.protostuff.Output 
output, org.apache.drill.ex
 
                 if(message.hasCarriesTwoByteSelectionVector())
                     output.writeBool(3, 
message.getCarriesTwoByteSelectionVector(), false);
+                if(message.hasAffectedRowsCount())
+                    output.writeInt32(4, message.getAffectedRowsCount(), 
false);
             }
             public boolean 
isInitialized(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef message)
             {
@@ -879,6 +881,9 @@ public void mergeFrom(com.dyuproject.protostuff.Input 
input, org.apache.drill.ex
                         case 3:
                             
builder.setCarriesTwoByteSelectionVector(input.readBool());
                             break;
+                        case 4:
+                            builder.setAffectedRowsCount(input.readInt32());
+                            break;
                         default:
                             input.handleUnknownField(number, this);
                     }
@@ -922,6 +927,7 @@ public void writeTo(com.dyuproject.protostuff.Output 
output, org.apache.drill.ex
                 case 1: return "recordCount";
                 case 2: return "field";
                 case 3: return "carriesTwoByteSelectionVector";
+                case 4: return "affectedRowsCount";
                 default: return null;
             }
         }
@@ -936,6 +942,7 @@ public static int getFieldNumber(java.lang.String name)
             fieldMap.put("recordCount", 1);
             fieldMap.put("field", 2);
             fieldMap.put("carriesTwoByteSelectionVector", 3);
+            fieldMap.put("affectedRowsCount", 4);
         }
     }
 
@@ -1484,6 +1491,8 @@ public void writeTo(com.dyuproject.protostuff.Output 
output, org.apache.drill.ex
                 if(message.hasDef())
                     output.writeObject(3, message.getDef(), 
org.apache.drill.exec.proto.SchemaUserBitShared.RecordBatchDef.WRITE, false);
 
+                if(message.hasAffectedRowsCount())
+                    output.writeInt32(4, message.getAffectedRowsCount(), 
false);
             }
             public boolean 
isInitialized(org.apache.drill.exec.proto.UserBitShared.QueryData message)
             {
@@ -1533,6 +1542,9 @@ public void mergeFrom(com.dyuproject.protostuff.Input 
input, org.apache.drill.ex
                         case 3:
                             
builder.setDef(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(),
 org.apache.drill.exec.proto.SchemaUserBitShared.RecordBatchDef.MERGE));
 
+                            break;
+                        case 4:
+                            builder.setAffectedRowsCount(input.readInt32());
                             break;
                         default:
                             input.handleUnknownField(number, this);
@@ -1577,6 +1589,7 @@ public void writeTo(com.dyuproject.protostuff.Output 
output, org.apache.drill.ex
                 case 1: return "queryId";
                 case 2: return "rowCount";
                 case 3: return "def";
+                case 4: return "affectedRowsCount";
                 default: return null;
             }
         }
@@ -1591,6 +1604,7 @@ public static int getFieldNumber(java.lang.String name)
             fieldMap.put("queryId", 1);
             fieldMap.put("rowCount", 2);
             fieldMap.put("def", 3);
+            fieldMap.put("affectedRowsCount", 4);
         }
     }
 
diff --git 
a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java 
b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
index 2f5c3de214d..f9696394f16 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
@@ -6796,6 +6796,16 @@ public Builder clearEndRow() {
      * <code>optional bool carries_two_byte_selection_vector = 3;</code>
      */
     boolean getCarriesTwoByteSelectionVector();
+
+    // optional int32 affected_rows_count = 4;
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    boolean hasAffectedRowsCount();
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    int getAffectedRowsCount();
   }
   /**
    * Protobuf type {@code exec.shared.RecordBatchDef}
@@ -6866,6 +6876,11 @@ private RecordBatchDef(
               carriesTwoByteSelectionVector_ = input.readBool();
               break;
             }
+            case 32: {
+              bitField0_ |= 0x00000004;
+              affectedRowsCount_ = input.readInt32();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -6977,10 +6992,27 @@ public boolean getCarriesTwoByteSelectionVector() {
       return carriesTwoByteSelectionVector_;
     }
 
+    // optional int32 affected_rows_count = 4;
+    public static final int AFFECTED_ROWS_COUNT_FIELD_NUMBER = 4;
+    private int affectedRowsCount_;
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    public boolean hasAffectedRowsCount() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    public int getAffectedRowsCount() {
+      return affectedRowsCount_;
+    }
+
     private void initFields() {
       recordCount_ = 0;
       field_ = java.util.Collections.emptyList();
       carriesTwoByteSelectionVector_ = false;
+      affectedRowsCount_ = 0;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -7003,6 +7035,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream 
output)
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeBool(3, carriesTwoByteSelectionVector_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt32(4, affectedRowsCount_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -7024,6 +7059,10 @@ public int getSerializedSize() {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(3, carriesTwoByteSelectionVector_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(4, affectedRowsCount_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -7151,6 +7190,8 @@ public Builder clear() {
         }
         carriesTwoByteSelectionVector_ = false;
         bitField0_ = (bitField0_ & ~0x00000004);
+        affectedRowsCount_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
 
@@ -7196,6 +7237,10 @@ public Builder clone() {
           to_bitField0_ |= 0x00000002;
         }
         result.carriesTwoByteSelectionVector_ = carriesTwoByteSelectionVector_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.affectedRowsCount_ = affectedRowsCount_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -7244,6 +7289,9 @@ public Builder 
mergeFrom(org.apache.drill.exec.proto.UserBitShared.RecordBatchDe
         if (other.hasCarriesTwoByteSelectionVector()) {
           
setCarriesTwoByteSelectionVector(other.getCarriesTwoByteSelectionVector());
         }
+        if (other.hasAffectedRowsCount()) {
+          setAffectedRowsCount(other.getAffectedRowsCount());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -7577,6 +7625,39 @@ public Builder clearCarriesTwoByteSelectionVector() {
         return this;
       }
 
+      // optional int32 affected_rows_count = 4;
+      private int affectedRowsCount_ ;
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public boolean hasAffectedRowsCount() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public int getAffectedRowsCount() {
+        return affectedRowsCount_;
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public Builder setAffectedRowsCount(int value) {
+        bitField0_ |= 0x00000008;
+        affectedRowsCount_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public Builder clearAffectedRowsCount() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        affectedRowsCount_ = 0;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:exec.shared.RecordBatchDef)
     }
 
@@ -11586,6 +11667,16 @@ public Builder removeError(int index) {
      * <code>optional .exec.shared.RecordBatchDef def = 3;</code>
      */
     org.apache.drill.exec.proto.UserBitShared.RecordBatchDefOrBuilder 
getDefOrBuilder();
+
+    // optional int32 affected_rows_count = 4;
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    boolean hasAffectedRowsCount();
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    int getAffectedRowsCount();
   }
   /**
    * Protobuf type {@code exec.shared.QueryData}
@@ -11674,6 +11765,11 @@ private QueryData(
               bitField0_ |= 0x00000004;
               break;
             }
+            case 32: {
+              bitField0_ |= 0x00000008;
+              affectedRowsCount_ = input.readInt32();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -11774,10 +11870,27 @@ public boolean hasDef() {
       return def_;
     }
 
+    // optional int32 affected_rows_count = 4;
+    public static final int AFFECTED_ROWS_COUNT_FIELD_NUMBER = 4;
+    private int affectedRowsCount_;
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    public boolean hasAffectedRowsCount() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>optional int32 affected_rows_count = 4;</code>
+     */
+    public int getAffectedRowsCount() {
+      return affectedRowsCount_;
+    }
+
     private void initFields() {
       queryId_ = 
org.apache.drill.exec.proto.UserBitShared.QueryId.getDefaultInstance();
       rowCount_ = 0;
       def_ = 
org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance();
+      affectedRowsCount_ = 0;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -11800,6 +11913,9 @@ public void 
writeTo(com.google.protobuf.CodedOutputStream output)
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeMessage(3, def_);
       }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeInt32(4, affectedRowsCount_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -11821,6 +11937,10 @@ public int getSerializedSize() {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(3, def_);
       }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(4, affectedRowsCount_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -11958,6 +12078,8 @@ public Builder clear() {
           defBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000004);
+        affectedRowsCount_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
 
@@ -12006,6 +12128,10 @@ public Builder clone() {
         } else {
           result.def_ = defBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.affectedRowsCount_ = affectedRowsCount_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -12031,6 +12157,9 @@ public Builder 
mergeFrom(org.apache.drill.exec.proto.UserBitShared.QueryData oth
         if (other.hasDef()) {
           mergeDef(other.getDef());
         }
+        if (other.hasAffectedRowsCount()) {
+          setAffectedRowsCount(other.getAffectedRowsCount());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -12325,6 +12454,39 @@ public Builder clearDef() {
         return defBuilder_;
       }
 
+      // optional int32 affected_rows_count = 4;
+      private int affectedRowsCount_ ;
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public boolean hasAffectedRowsCount() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public int getAffectedRowsCount() {
+        return affectedRowsCount_;
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public Builder setAffectedRowsCount(int value) {
+        bitField0_ |= 0x00000008;
+        affectedRowsCount_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int32 affected_rows_count = 4;</code>
+       */
+      public Builder clearAffectedRowsCount() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        affectedRowsCount_ = 0;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:exec.shared.QueryData)
     }
 
@@ -24354,117 +24516,119 @@ public Builder clearStatus() {
       "\013method_name\030\004 \001(\t\022\030\n\020is_native_method\030\005" +
       " \001(\010\"\\\n\014ParsingError\022\024\n\014start_column\030\002 
\001" +
       "(\005\022\021\n\tstart_row\030\003 
\001(\005\022\022\n\nend_column\030\004 \001(" +
-      "\005\022\017\n\007end_row\030\005 
\001(\005\"~\n\016RecordBatchDef\022\024\n\014" +
-      "record_count\030\001 \001(\005\022+\n\005field\030\002 
\003(\0132\034.exec" +
-      ".shared.SerializedField\022)\n!carries_two_b" +
-      "yte_selection_vector\030\003 \001(\010\"\205\001\n\010NamePart\022",
-      "(\n\004type\030\001 \001(\0162\032.exec.shared.NamePart.Typ" +
-      "e\022\014\n\004name\030\002 \001(\t\022$\n\005child\030\003 
\001(\0132\025.exec.sh" +
-      
"ared.NamePart\"\033\n\004Type\022\010\n\004NAME\020\000\022\t\n\005ARRAY" +
-      "\020\001\"\324\001\n\017SerializedField\022%\n\nmajor_type\030\001 
\001" +
-      "(\0132\021.common.MajorType\022(\n\tname_part\030\002 \001(\013" +
-      "2\025.exec.shared.NamePart\022+\n\005child\030\003 \003(\0132\034" +
-      ".exec.shared.SerializedField\022\023\n\013value_co" +
-      "unt\030\004 \001(\005\022\027\n\017var_byte_length\030\005 
\001(\005\022\025\n\rbu" +
-      "ffer_length\030\007 \001(\005\"7\n\nNodeStatus\022\017\n\007node_" +
-      "id\030\001 \001(\005\022\030\n\020memory_footprint\030\002 
\001(\003\"\263\002\n\013Q",
-      "ueryResult\0228\n\013query_state\030\001 \001(\0162#.exec.s" +
-      "hared.QueryResult.QueryState\022&\n\010query_id" +
-      "\030\002 \001(\0132\024.exec.shared.QueryId\022(\n\005error\030\003 " +
-      "\003(\0132\031.exec.shared.DrillPBError\"\227\001\n\nQuery" +
-      
"State\022\014\n\010STARTING\020\000\022\013\n\007RUNNING\020\001\022\r\n\tCOMP"
 +
-      
"LETED\020\002\022\014\n\010CANCELED\020\003\022\n\n\006FAILED\020\004\022\032\n\026CAN"
 +
-      "CELLATION_REQUESTED\020\005\022\014\n\010ENQUEUED\020\006\022\r\n\tP" +
-      "REPARING\020\007\022\014\n\010PLANNING\020\010\"p\n\tQueryData\022&\n" +
-      "\010query_id\030\001 \001(\0132\024.exec.shared.QueryId\022\021\n" +
-      "\trow_count\030\002 \001(\005\022(\n\003def\030\003 
\001(\0132\033.exec.sha",
-      "red.RecordBatchDef\"\330\001\n\tQueryInfo\022\r\n\005quer" +
-      "y\030\001 \001(\t\022\r\n\005start\030\002 
\001(\003\0222\n\005state\030\003 \001(\0162#." +
-      "exec.shared.QueryResult.QueryState\022\017\n\004us" +
-      "er\030\004 \001(\t:\001-\022\'\n\007foreman\030\005 
\001(\0132\026.exec.Dril" +
-      "lbitEndpoint\022\024\n\014options_json\030\006 \001(\t\022\022\n\nto" +
-      "tal_cost\030\007 \001(\001\022\025\n\nqueue_name\030\010 
\001(\t:\001-\"\263\004" +
-      "\n\014QueryProfile\022 \n\002id\030\001 \001(\0132\024.exec.shared" +
-      ".QueryId\022$\n\004type\030\002 \001(\0162\026.exec.shared.Que" +
-      "ryType\022\r\n\005start\030\003 \001(\003\022\013\n\003end\030\004 
\001(\003\022\r\n\005qu" +
-      "ery\030\005 \001(\t\022\014\n\004plan\030\006 
\001(\t\022\'\n\007foreman\030\007 \001(\013",
-      "2\026.exec.DrillbitEndpoint\0222\n\005state\030\010 \001(\0162" +
-      "#.exec.shared.QueryResult.QueryState\022\027\n\017" +
-      "total_fragments\030\t \001(\005\022\032\n\022finished_fragme" +
-      "nts\030\n \001(\005\022;\n\020fragment_profile\030\013 \003(\0132!.ex" +
-      "ec.shared.MajorFragmentProfile\022\017\n\004user\030\014" +
-      " \001(\t:\001-\022\r\n\005error\030\r 
\001(\t\022\024\n\014verboseError\030\016" +
-      " \001(\t\022\020\n\010error_id\030\017 
\001(\t\022\022\n\nerror_node\030\020 \001" +
-      "(\t\022\024\n\014options_json\030\021 
\001(\t\022\017\n\007planEnd\030\022 \001(" +
-      "\003\022\024\n\014queueWaitEnd\030\023 
\001(\003\022\022\n\ntotal_cost\030\024 " +
-      "\001(\001\022\025\n\nqueue_name\030\025 
\001(\t:\001-\022\017\n\007queryId\030\026 ",
-      "\001(\t\"t\n\024MajorFragmentProfile\022\031\n\021major_fra" +
-      "gment_id\030\001 \001(\005\022A\n\026minor_fragment_profile" +
-      "\030\002 \003(\0132!.exec.shared.MinorFragmentProfil" +
-      "e\"\350\002\n\024MinorFragmentProfile\022)\n\005state\030\001 \001(" +
-      "\0162\032.exec.shared.FragmentState\022(\n\005error\030\002" +
-      " \001(\0132\031.exec.shared.DrillPBError\022\031\n\021minor" +
-      "_fragment_id\030\003 \001(\005\0226\n\020operator_profile\030\004" +
-      " \003(\0132\034.exec.shared.OperatorProfile\022\022\n\nst" +
-      "art_time\030\005 \001(\003\022\020\n\010end_time\030\006 
\001(\003\022\023\n\013memo" +
-      "ry_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 
\001(\003\022(",
-      "\n\010endpoint\030\t \001(\0132\026.exec.DrillbitEndpoint" +
-      "\022\023\n\013last_update\030\n 
\001(\003\022\025\n\rlast_progress\030\013" +
-      " \001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_profil" +
-      "e\030\001 \003(\0132\032.exec.shared.StreamProfile\022\023\n\013o" +
-      "perator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 
\001(\005\022" +
-      "\023\n\013setup_nanos\030\005 
\001(\003\022\025\n\rprocess_nanos\030\006 " +
-      "\001(\003\022#\n\033peak_local_memory_allocated\030\007 \001(\003" +
-      "\022(\n\006metric\030\010 \003(\0132\030.exec.shared.MetricVal" +
-      "ue\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProfile\022" +
-      "\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 
\001(\003\022\017\n\007sch",
-      "emas\030\003 
\001(\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001" +
-      " \001(\005\022\022\n\nlong_value\030\002 
\001(\003\022\024\n\014double_value" +
-      "\030\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 
\003(\0132\020.exec.s" +
-      "hared.Jar\"/\n\003Jar\022\014\n\004name\030\001 
\001(\t\022\032\n\022functi" +
-      "on_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmec" +
-      "hanism\030\001 \001(\t\022\014\n\004data\030\002 
\001(\014\022\'\n\006status\030\003 \001" +
-      "(\0162\027.exec.shared.SaslStatus*5\n\nRpcChanne" +
-      
"l\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER"
 +
-      
"\020\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n"
 +
-      
"\010PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_S",
-      "TATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020" +
-      
"\000\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022"
 +
-      
"\014\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005"
 +
-      "\022\032\n\026CANCELLATION_REQUESTED\020\006*\222\t\n\020CoreOpe" +
-      "ratorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROADCAS" +
-      "T_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREGATE" 
+
-      
"\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HAS"
 +
-      "H_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGI" 
+
-      "NG_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_SENDE" +
-      
"R\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIVER\020\013",
-      "\022\032\n\026RANGE_PARTITION_SENDER\020\014\022\n\n\006SCREEN\020\r" +
-      "\022\034\n\030SELECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAM" +
-      "ING_AGGREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTER" +
-      
"NAL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_"
 +
-      "SORT\020\024\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHI" 
+
-      "VE_SUB_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\r" 
+
-      
"MOCK_SUB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017D" +
-      "IRECT_SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEX" 
+
-      "T_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_" 
+
-      "SCHEMA_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025",
-      "\n\021PRODUCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020" +
-      
"!\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rA" +
-      "VRO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_SCAN\020%\022\022\n\016KAF" +
-      "KA_SUB_SCAN\020&\022\021\n\rKUDU_SUB_SCAN\020\'\022\013\n\007FLAT" +
-      
"TEN\020(\022\020\n\014LATERAL_JOIN\020)\022\n\n\006UNNEST\020*\022,\n(H" +
-      "IVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN\020" +
-      
"+\022\r\n\tJDBC_SCAN\020,\022\022\n\016REGEX_SUB_SCAN\020-\022\023\n\017" +
-      "MAPRDB_SUB_SCAN\020.\022\022\n\016MONGO_SUB_SCAN\020/\022\017\n" +
-      "\013KUDU_WRITER\0200\022\026\n\022OPEN_TSDB_SUB_SCAN\0201\022\017" +
-      "\n\013JSON_WRITER\0202\022\026\n\022HTPPD_LOG_SUB_SCAN\0203\022",
-      "\022\n\016IMAGE_SUB_SCAN\0204\022\025\n\021SEQUENCE_SUB_SCAN" +
-      "\0205\022\023\n\017PARTITION_LIMIT\0206\022\023\n\017PCAPNG_SUB_SC" +
-      "AN\0207\022\022\n\016RUNTIME_FILTER\0208\022\017\n\013ROWKEY_JOIN\020" +
-      "9*g\n\nSaslStatus\022\020\n\014SASL_UNKNOWN\020\000\022\016\n\nSAS" +
-      
"L_START\020\001\022\024\n\020SASL_IN_PROGRESS\020\002\022\020\n\014SASL_" +
-      "SUCCESS\020\003\022\017\n\013SASL_FAILED\020\004B.\n\033org.apache" +
-      ".drill.exec.protoB\rUserBitSharedH\001"
+      "\005\022\017\n\007end_row\030\005 
\001(\005\"\233\001\n\016RecordBatchDef\022\024\n" +
+      "\014record_count\030\001 \001(\005\022+\n\005field\030\002 
\003(\0132\034.exe" +
+      "c.shared.SerializedField\022)\n!carries_two_" +
+      "byte_selection_vector\030\003 \001(\010\022\033\n\023affected_",
+      "rows_count\030\004 
\001(\005\"\205\001\n\010NamePart\022(\n\004type\030\001 " +
+      "\001(\0162\032.exec.shared.NamePart.Type\022\014\n\004name\030" +
+      "\002 \001(\t\022$\n\005child\030\003 \001(\0132\025.exec.shared.NameP" +
+      
"art\"\033\n\004Type\022\010\n\004NAME\020\000\022\t\n\005ARRAY\020\001\"\324\001\n\017Ser"
 +
+      "ializedField\022%\n\nmajor_type\030\001 \001(\0132\021.commo" +
+      "n.MajorType\022(\n\tname_part\030\002 \001(\0132\025.exec.sh" +
+      "ared.NamePart\022+\n\005child\030\003 \003(\0132\034.exec.shar" +
+      "ed.SerializedField\022\023\n\013value_count\030\004 \001(\005\022" +
+      "\027\n\017var_byte_length\030\005 \001(\005\022\025\n\rbuffer_lengt" +
+      "h\030\007 \001(\005\"7\n\nNodeStatus\022\017\n\007node_id\030\001 
\001(\005\022\030",
+      "\n\020memory_footprint\030\002 \001(\003\"\263\002\n\013QueryResult" +
+      "\0228\n\013query_state\030\001 \001(\0162#.exec.shared.Quer" +
+      "yResult.QueryState\022&\n\010query_id\030\002 \001(\0132\024.e" +
+      "xec.shared.QueryId\022(\n\005error\030\003 \003(\0132\031.exec" +
+      ".shared.DrillPBError\"\227\001\n\nQueryState\022\014\n\010S" +
+      
"TARTING\020\000\022\013\n\007RUNNING\020\001\022\r\n\tCOMPLETED\020\002\022\014\n"
 +
+      
"\010CANCELED\020\003\022\n\n\006FAILED\020\004\022\032\n\026CANCELLATION_" +
+      
"REQUESTED\020\005\022\014\n\010ENQUEUED\020\006\022\r\n\tPREPARING\020\007" +
+      
"\022\014\n\010PLANNING\020\010\"\215\001\n\tQueryData\022&\n\010query_id" +
+      "\030\001 \001(\0132\024.exec.shared.QueryId\022\021\n\trow_coun",
+      "t\030\002 \001(\005\022(\n\003def\030\003 
\001(\0132\033.exec.shared.Recor" +
+      "dBatchDef\022\033\n\023affected_rows_count\030\004 \001(\005\"\330" +
+      "\001\n\tQueryInfo\022\r\n\005query\030\001 
\001(\t\022\r\n\005start\030\002 \001" +
+      "(\003\0222\n\005state\030\003 \001(\0162#.exec.shared.QueryRes" +
+      "ult.QueryState\022\017\n\004user\030\004 
\001(\t:\001-\022\'\n\007forem" +
+      "an\030\005 \001(\0132\026.exec.DrillbitEndpoint\022\024\n\014opti" +
+      "ons_json\030\006 \001(\t\022\022\n\ntotal_cost\030\007 
\001(\001\022\025\n\nqu" +
+      "eue_name\030\010 \001(\t:\001-\"\263\004\n\014QueryProfile\022 
\n\002id" +
+      "\030\001 \001(\0132\024.exec.shared.QueryId\022$\n\004type\030\002 
\001" +
+      "(\0162\026.exec.shared.QueryType\022\r\n\005start\030\003 \001(",
+      "\003\022\013\n\003end\030\004 \001(\003\022\r\n\005query\030\005 
\001(\t\022\014\n\004plan\030\006 " +
+      "\001(\t\022\'\n\007foreman\030\007 \001(\0132\026.exec.DrillbitEndp" +
+      "oint\0222\n\005state\030\010 \001(\0162#.exec.shared.QueryR" +
+      "esult.QueryState\022\027\n\017total_fragments\030\t \001(" +
+      "\005\022\032\n\022finished_fragments\030\n \001(\005\022;\n\020fragmen" 
+
+      "t_profile\030\013 \003(\0132!.exec.shared.MajorFragm" +
+      "entProfile\022\017\n\004user\030\014 
\001(\t:\001-\022\r\n\005error\030\r \001" +
+      "(\t\022\024\n\014verboseError\030\016 
\001(\t\022\020\n\010error_id\030\017 \001" +
+      "(\t\022\022\n\nerror_node\030\020 
\001(\t\022\024\n\014options_json\030\021" +
+      " \001(\t\022\017\n\007planEnd\030\022 
\001(\003\022\024\n\014queueWaitEnd\030\023 ",
+      "\001(\003\022\022\n\ntotal_cost\030\024 
\001(\001\022\025\n\nqueue_name\030\025 " +
+      "\001(\t:\001-\022\017\n\007queryId\030\026 
\001(\t\"t\n\024MajorFragment" +
+      "Profile\022\031\n\021major_fragment_id\030\001 \001(\005\022A\n\026mi" +
+      "nor_fragment_profile\030\002 \003(\0132!.exec.shared" +
+      ".MinorFragmentProfile\"\350\002\n\024MinorFragmentP" +
+      "rofile\022)\n\005state\030\001 \001(\0162\032.exec.shared.Frag" +
+      "mentState\022(\n\005error\030\002 \001(\0132\031.exec.shared.D" +
+      "rillPBError\022\031\n\021minor_fragment_id\030\003 \001(\005\0226" +
+      "\n\020operator_profile\030\004 \003(\0132\034.exec.shared.O" +
+      "peratorProfile\022\022\n\nstart_time\030\005 \001(\003\022\020\n\010en",
+      "d_time\030\006 \001(\003\022\023\n\013memory_used\030\007 
\001(\003\022\027\n\017max" +
+      "_memory_used\030\010 \001(\003\022(\n\010endpoint\030\t 
\001(\0132\026.e" +
+      "xec.DrillbitEndpoint\022\023\n\013last_update\030\n \001(" +
+      "\003\022\025\n\rlast_progress\030\013 
\001(\003\"\377\001\n\017OperatorPro" +
+      "file\0221\n\rinput_profile\030\001 \003(\0132\032.exec.share" +
+      "d.StreamProfile\022\023\n\013operator_id\030\003 \001(\005\022\025\n\r" 
+
+      "operator_type\030\004 \001(\005\022\023\n\013setup_nanos\030\005 
\001(\003" +
+      "\022\025\n\rprocess_nanos\030\006 \001(\003\022#\n\033peak_local_me" +
+      "mory_allocated\030\007 \001(\003\022(\n\006metric\030\010 
\003(\0132\030.e" +
+      "xec.shared.MetricValue\022\022\n\nwait_nanos\030\t \001",
+      "(\003\"B\n\rStreamProfile\022\017\n\007records\030\001 
\001(\003\022\017\n\007" +
+      "batches\030\002 \001(\003\022\017\n\007schemas\030\003 
\001(\003\"J\n\013Metric" +
+      "Value\022\021\n\tmetric_id\030\001 
\001(\005\022\022\n\nlong_value\030\002" +
+      " \001(\003\022\024\n\014double_value\030\003 
\001(\001\")\n\010Registry\022\035" +
+      "\n\003jar\030\001 
\003(\0132\020.exec.shared.Jar\"/\n\003Jar\022\014\n\004" +
+      "name\030\001 \001(\t\022\032\n\022function_signature\030\002 
\003(\t\"W" +
+      "\n\013SaslMessage\022\021\n\tmechanism\030\001 
\001(\t\022\014\n\004data" +
+      "\030\002 \001(\014\022\'\n\006status\030\003 
\001(\0162\027.exec.shared.Sas" +
+      "lStatus*5\n\nRpcChannel\022\017\n\013BIT_CONTROL\020\000\022\014" +
+      
"\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*V\n\tQueryType\022\007\n\003S",
+      
"QL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL\020\003\022\r\n\tEXECU"
 +
+      "TION\020\004\022\026\n\022PREPARED_STATEMENT\020\005*\207\001\n\rFragm" 
+
+      "entState\022\013\n\007SENDING\020\000\022\027\n\023AWAITING_ALLOCA" +
+      
"TION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISHED\020\003\022\r\n\tCAN"
 +
+      "CELLED\020\004\022\n\n\006FAILED\020\005\022\032\n\026CANCELLATION_REQ" 
+
+      "UESTED\020\006*\222\t\n\020CoreOperatorType\022\021\n\rSINGLE_" +
+      "SENDER\020\000\022\024\n\020BROADCAST_SENDER\020\001\022\n\n\006FILTER" 
+
+      
"\020\002\022\022\n\016HASH_AGGREGATE\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n"
 +
+      "\nMERGE_JOIN\020\005\022\031\n\025HASH_PARTITION_SENDER\020\006" +
+      
"\022\t\n\005LIMIT\020\007\022\024\n\020MERGING_RECEIVER\020\010\022\034\n\030ORD",
+      "ERED_PARTITION_SENDER\020\t\022\013\n\007PROJECT\020\n\022\026\n\022" +
+      "UNORDERED_RECEIVER\020\013\022\032\n\026RANGE_PARTITION_" +
+      "SENDER\020\014\022\n\n\006SCREEN\020\r\022\034\n\030SELECTION_VECTOR" +
+      "_REMOVER\020\016\022\027\n\023STREAMING_AGGREGATE\020\017\022\016\n\nT" 
+
+      
"OP_N_SORT\020\020\022\021\n\rEXTERNAL_SORT\020\021\022\t\n\005TRACE\020" +
+      
"\022\022\t\n\005UNION\020\023\022\014\n\010OLD_SORT\020\024\022\032\n\026PARQUET_RO"
 +
+      "W_GROUP_SCAN\020\025\022\021\n\rHIVE_SUB_SCAN\020\026\022\025\n\021SYS" 
+
+      "TEM_TABLE_SCAN\020\027\022\021\n\rMOCK_SUB_SCAN\020\030\022\022\n\016P" 
+
+      
"ARQUET_WRITER\020\031\022\023\n\017DIRECT_SUB_SCAN\020\032\022\017\n\013" +
+      "TEXT_WRITER\020\033\022\021\n\rTEXT_SUB_SCAN\020\034\022\021\n\rJSON",
+      "_SUB_SCAN\020\035\022\030\n\024INFO_SCHEMA_SUB_SCAN\020\036\022\023\n" +
+      "\017COMPLEX_TO_JSON\020\037\022\025\n\021PRODUCER_CONSUMER\020" +
+      " 
\022\022\n\016HBASE_SUB_SCAN\020!\022\n\n\006WINDOW\020\"\022\024\n\020NES" +
+      "TED_LOOP_JOIN\020#\022\021\n\rAVRO_SUB_SCAN\020$\022\021\n\rPC" +
+      "AP_SUB_SCAN\020%\022\022\n\016KAFKA_SUB_SCAN\020&\022\021\n\rKUD" +
+      "U_SUB_SCAN\020\'\022\013\n\007FLATTEN\020(\022\020\n\014LATERAL_JOI" +
+      "N\020)\022\n\n\006UNNEST\020*\022,\n(HIVE_DRILL_NATIVE_PAR" +
+      "QUET_ROW_GROUP_SCAN\020+\022\r\n\tJDBC_SCAN\020,\022\022\n\016" +
+      "REGEX_SUB_SCAN\020-\022\023\n\017MAPRDB_SUB_SCAN\020.\022\022\n" +
+      "\016MONGO_SUB_SCAN\020/\022\017\n\013KUDU_WRITER\0200\022\026\n\022OP",
+      "EN_TSDB_SUB_SCAN\0201\022\017\n\013JSON_WRITER\0202\022\026\n\022H" +
+      "TPPD_LOG_SUB_SCAN\0203\022\022\n\016IMAGE_SUB_SCAN\0204\022" +
+      "\025\n\021SEQUENCE_SUB_SCAN\0205\022\023\n\017PARTITION_LIMI" +
+      "T\0206\022\023\n\017PCAPNG_SUB_SCAN\0207\022\022\n\016RUNTIME_FILT" +
+      "ER\0208\022\017\n\013ROWKEY_JOIN\0209*g\n\nSaslStatus\022\020\n\014S" +
+      "ASL_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020SASL_IN" 
+
+      
"_PROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013SASL_FA" +
+      "ILED\020\004B.\n\033org.apache.drill.exec.protoB\rU" +
+      "serBitSharedH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
       new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -24512,7 +24676,7 @@ public Builder clearStatus() {
           internal_static_exec_shared_RecordBatchDef_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_exec_shared_RecordBatchDef_descriptor,
-              new java.lang.String[] { "RecordCount", "Field", 
"CarriesTwoByteSelectionVector", });
+              new java.lang.String[] { "RecordCount", "Field", 
"CarriesTwoByteSelectionVector", "AffectedRowsCount", });
           internal_static_exec_shared_NamePart_descriptor =
             getDescriptor().getMessageTypes().get(7);
           internal_static_exec_shared_NamePart_fieldAccessorTable = new
@@ -24542,7 +24706,7 @@ public Builder clearStatus() {
           internal_static_exec_shared_QueryData_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_exec_shared_QueryData_descriptor,
-              new java.lang.String[] { "QueryId", "RowCount", "Def", });
+              new java.lang.String[] { "QueryId", "RowCount", "Def", 
"AffectedRowsCount", });
           internal_static_exec_shared_QueryInfo_descriptor =
             getDescriptor().getMessageTypes().get(12);
           internal_static_exec_shared_QueryInfo_fieldAccessorTable = new
diff --git 
a/protocol/src/main/java/org/apache/drill/exec/proto/beans/QueryData.java 
b/protocol/src/main/java/org/apache/drill/exec/proto/beans/QueryData.java
index 36f27477beb..7744848561e 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/beans/QueryData.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/QueryData.java
@@ -50,6 +50,7 @@ public static QueryData getDefaultInstance()
     private QueryId queryId;
     private int rowCount;
     private RecordBatchDef def;
+    private int affectedRowsCount;
 
     public QueryData()
     {
@@ -97,6 +98,19 @@ public QueryData setDef(RecordBatchDef def)
         return this;
     }
 
+    // affectedRowsCount
+
+    public int getAffectedRowsCount()
+    {
+        return affectedRowsCount;
+    }
+
+    public QueryData setAffectedRowsCount(int affectedRowsCount)
+    {
+        this.affectedRowsCount = affectedRowsCount;
+        return this;
+    }
+
     // java serialization
 
     public void readExternal(ObjectInput in) throws IOException
@@ -162,6 +176,9 @@ public void mergeFrom(Input input, QueryData message) 
throws IOException
                     message.def = input.mergeObject(message.def, 
RecordBatchDef.getSchema());
                     break;
 
+                case 4:
+                    message.affectedRowsCount = input.readInt32();
+                    break;
                 default:
                     input.handleUnknownField(number, this);
             }   
@@ -181,6 +198,9 @@ public void writeTo(Output output, QueryData message) 
throws IOException
         if(message.def != null)
              output.writeObject(3, message.def, RecordBatchDef.getSchema(), 
false);
 
+
+        if(message.affectedRowsCount != 0)
+            output.writeInt32(4, message.affectedRowsCount, false);
     }
 
     public String getFieldName(int number)
@@ -190,6 +210,7 @@ public String getFieldName(int number)
             case 1: return "queryId";
             case 2: return "rowCount";
             case 3: return "def";
+            case 4: return "affectedRowsCount";
             default: return null;
         }
     }
@@ -206,6 +227,7 @@ public int getFieldNumber(String name)
         __fieldMap.put("queryId", 1);
         __fieldMap.put("rowCount", 2);
         __fieldMap.put("def", 3);
+        __fieldMap.put("affectedRowsCount", 4);
     }
     
 }
diff --git 
a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RecordBatchDef.java 
b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RecordBatchDef.java
index 53bfc91f585..83760133cdf 100644
--- 
a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RecordBatchDef.java
+++ 
b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RecordBatchDef.java
@@ -52,6 +52,7 @@ public static RecordBatchDef getDefaultInstance()
     private int recordCount;
     private List<SerializedField> field;
     private Boolean carriesTwoByteSelectionVector;
+    private int affectedRowsCount;
 
     public RecordBatchDef()
     {
@@ -99,6 +100,19 @@ public RecordBatchDef 
setCarriesTwoByteSelectionVector(Boolean carriesTwoByteSel
         return this;
     }
 
+    // affectedRowsCount
+
+    public int getAffectedRowsCount()
+    {
+        return affectedRowsCount;
+    }
+
+    public RecordBatchDef setAffectedRowsCount(int affectedRowsCount)
+    {
+        this.affectedRowsCount = affectedRowsCount;
+        return this;
+    }
+
     // java serialization
 
     public void readExternal(ObjectInput in) throws IOException
@@ -165,6 +179,9 @@ public void mergeFrom(Input input, RecordBatchDef message) 
throws IOException
                 case 3:
                     message.carriesTwoByteSelectionVector = input.readBool();
                     break;
+                case 4:
+                    message.affectedRowsCount = input.readInt32();
+                    break;
                 default:
                     input.handleUnknownField(number, this);
             }   
@@ -189,6 +206,9 @@ public void writeTo(Output output, RecordBatchDef message) 
throws IOException
 
         if(message.carriesTwoByteSelectionVector != null)
             output.writeBool(3, message.carriesTwoByteSelectionVector, false);
+
+        if(message.affectedRowsCount != 0)
+            output.writeInt32(4, message.affectedRowsCount, false);
     }
 
     public String getFieldName(int number)
@@ -198,6 +218,7 @@ public String getFieldName(int number)
             case 1: return "recordCount";
             case 2: return "field";
             case 3: return "carriesTwoByteSelectionVector";
+            case 4: return "affectedRowsCount";
             default: return null;
         }
     }
@@ -214,6 +235,7 @@ public int getFieldNumber(String name)
         __fieldMap.put("recordCount", 1);
         __fieldMap.put("field", 2);
         __fieldMap.put("carriesTwoByteSelectionVector", 3);
+        __fieldMap.put("affectedRowsCount", 4);
     }
     
 }
diff --git a/protocol/src/main/protobuf/UserBitShared.proto 
b/protocol/src/main/protobuf/UserBitShared.proto
index 843c6d8c385..4e2644f0005 100644
--- a/protocol/src/main/protobuf/UserBitShared.proto
+++ b/protocol/src/main/protobuf/UserBitShared.proto
@@ -140,6 +140,9 @@ message RecordBatchDef {
   optional int32 record_count = 1;
   repeated SerializedField field = 2;
   optional bool carries_two_byte_selection_vector = 3;
+  // The value is set when result set is disabled and its value corresponds to 
number
+  // of rows affected by query (see JDBC java.sql.ResultSet#getUpdateCount())
+  optional int32 affected_rows_count = 4;
 }
 
 message NamePart{
@@ -197,6 +200,7 @@ message QueryData {
   optional QueryId query_id = 1;
   optional int32 row_count = 2;
   optional RecordBatchDef def = 3;
+  optional int32 affected_rows_count = 4;
 }
 
 message QueryInfo {


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Introduce option to disable result set for DDL queries for JDBC connection
> --------------------------------------------------------------------------
>
>                 Key: DRILL-6834
>                 URL: https://issues.apache.org/jira/browse/DRILL-6834
>             Project: Apache Drill
>          Issue Type: Improvement
>            Reporter: Bohdan Kazydub
>            Assignee: Bohdan Kazydub
>            Priority: Major
>              Labels: doc-impacting, ready-to-commit
>             Fix For: 1.15.0
>
>
> There are some tools (Unica, dBeaver, TalenD) that do not expect to obtain 
> result set on CTAS query. As a result the query gets canceled. Hive, on the 
> other hand, does not return result set for the query and these tools work 
> well.
> To improve Drill's integration with such tools a session option 
> {{`exec.return_result_set_for_ddl}}{{`}} is introduced. If the option is 
> enabled (set to `true`) Drill's behaviour will be unchanged, i.e. a result 
> set will be returned for all queries. If the option is disabled (set to 
> `false`), CTAS, CREATE VIEW, CREATE FUNCTION, DROP TABLE, DROP VIEW, DROP 
> FUNCTION, USE schema, SET option, REFRESH METADATA TABLE queries will not 
> return result set but {{updateCount}} instead.
> The option affects JDBC connections only.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to