update
Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/c86d2385 Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/c86d2385 Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/c86d2385 Branch: refs/heads/master Commit: c86d23852d44e7982b4ddcb303a0148d33da8d1c Parents: fcc02bc Author: mashengchen <[email protected]> Authored: Mon Mar 28 08:38:19 2016 +0000 Committer: mashengchen <[email protected]> Committed: Mon Mar 28 08:38:19 2016 +0000 ---------------------------------------------------------------------- .../java/org/trafodion/jdbc/t2/BaseRow.java | 67 - .../java/org/trafodion/jdbc/t2/InsertRow.java | 89 - .../main/java/org/trafodion/jdbc/t2/Row.java | 275 -- core/sqf/sqenvcom.sh | 4 +- core/sqf/sql/scripts/ilh_cleanhb | 106 + core/sqf/sql/scripts/ilh_hbase_repair | 65 + core/sqf/sql/scripts/ilh_hbcheck | 72 + core/sqf/sql/scripts/ilh_traf_restart | 117 + core/sqf/sql/scripts/ilh_trafinit | 108 + .../sql/scripts/install_hadoop_regr_test_env | 4 +- core/sqf/sql/scripts/install_local_hadoop | 8 +- core/sqf/sql/scripts/trafnodestatus | 105 + core/sqf/sql/scripts/trafnodestatus.awk | 76 + core/sqf/tools/sqtools.sh | 56 + core/sql/bin/SqlciErrors.txt | 2 +- core/sql/comexe/ComTdbHbaseAccess.h | 27 +- core/sql/executor/ExHbaseSelect.cpp | 19 +- core/sql/generator/GenRelScan.cpp | 2 +- core/sql/generator/Generator.cpp | 8 +- core/sql/optimizer/NATable.cpp | 4 +- core/sql/parser/ElemDDLCol.cpp | 542 ++-- core/sql/parser/ElemDDLColDef.h | 14 +- core/sql/parser/StmtDDLCreate.cpp | 7 +- core/sql/parser/sqlparser.y | 17 +- core/sql/regress/core/EXPECTED131 | 89 +- core/sql/regress/core/TEST131 | 40 + core/sql/regress/privs1/EXPECTED133 | Bin 22984 -> 26383 bytes core/sql/regress/privs1/TEST133 | 97 +- core/sql/regress/seabase/EXPECTED010 | 2 +- core/sql/regress/seabase/EXPECTED027 | 157 +- core/sql/regress/seabase/TEST010 | 2 +- core/sql/regress/seabase/TEST027 | 16 + core/sql/sqlcomp/CmpSeabaseDDL.h | 1 + core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp | 11 +- core/sql/sqlcomp/CmpSeabaseDDLschema.cpp | 101 +- core/sql/sqlcomp/CmpSeabaseDDLtable.cpp | 8 +- core/sql/sqlcomp/CmpSeabaseDDLview.cpp | 1 - core/sql/ustat/hs_globals.cpp | 24 +- .../src/asciidoc/_chapters/SQuirrel.adoc | 8 +- .../src/asciidoc/_chapters/about.adoc | 11 +- .../src/asciidoc/_chapters/dbviz.adoc | 17 +- .../src/asciidoc/_chapters/introduction.adoc | 130 +- .../src/asciidoc/_chapters/jdbct4.adoc | 353 ++- .../src/asciidoc/_chapters/odb.adoc | 49 +- .../src/asciidoc/_chapters/odbc_linux.adoc | 82 +- .../src/asciidoc/_chapters/odbc_windows.adoc | 175 +- .../src/asciidoc/_chapters/sample_prog.adoc | 6 +- .../src/asciidoc/_chapters/trafci.adoc | 323 ++- docs/client_install/src/asciidoc/index.adoc | 11 +- .../src/images/win10_edit_path.jpg | Bin 0 -> 65094 bytes .../src/images/win10_select_java.jpg | Bin 0 -> 22758 bytes .../src/asciidoc/_chapters/about.adoc | 4 +- .../src/asciidoc/_chapters/commands.adoc | 8 +- .../src/asciidoc/_chapters/install.adoc | 70 +- .../src/asciidoc/_chapters/interactive.adoc | 56 +- .../src/asciidoc/_chapters/introduction.adoc | 10 +- .../src/asciidoc/_chapters/launch.adoc | 148 +- .../src/asciidoc/_chapters/perlpython.adoc | 72 +- .../src/asciidoc/_chapters/scripts.adoc | 24 +- docs/command_interface/src/asciidoc/index.adoc | 4 + .../src/resources/source/sample.pl | 19 + .../src/resources/source/sample.py | 29 + docs/spj_guide/pom.xml | 289 +++ .../spj_guide/src/asciidoc/_chapters/about.adoc | 200 ++ .../src/asciidoc/_chapters/create_spjs.adoc | 508 ++++ .../src/asciidoc/_chapters/deploy_spjs.adoc | 557 ++++ .../src/asciidoc/_chapters/develop_spjs.adoc | 899 +++++++ .../src/asciidoc/_chapters/execute_spjs.adoc | 730 ++++++ .../src/asciidoc/_chapters/get_started.adoc | 121 + .../asciidoc/_chapters/grant_privileges.adoc | 312 +++ .../src/asciidoc/_chapters/introduction.adoc | 255 ++ .../_chapters/performance_troubleshooting.adoc | 416 +++ .../src/asciidoc/_chapters/sample_database.adoc | 816 ++++++ .../src/asciidoc/_chapters/sample_spjs.adoc | 2386 ++++++++++++++++++ docs/spj_guide/src/asciidoc/index.adoc | 113 + docs/src/site/markdown/documentation.md | 4 + docs/src/site/markdown/download.md | 2 +- install/installer/rest_installer | 11 +- install/installer/traf_cloudera_mods98 | 64 +- install/installer/traf_createPasswordLessSSH | 6 +- install/installer/traf_getHadoopNodes | 105 +- install/installer/traf_hortonworks_mods98 | 101 +- .../installer/trafodion_apache_hadoop_install | 12 +- install/installer/trafodion_install | 12 - install/installer/trafodion_uninstaller | 10 +- pom.xml | 4 +- 86 files changed, 10151 insertions(+), 1734 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/BaseRow.java ---------------------------------------------------------------------- diff --git a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/BaseRow.java b/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/BaseRow.java deleted file mode 100644 index 2eed427..0000000 --- a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/BaseRow.java +++ /dev/null @@ -1,67 +0,0 @@ -// @@@ START COPYRIGHT @@@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// @@@ END COPYRIGHT @@@ - -// Source File Name: BaseRow.java - -/* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package org.trafodion.jdbc.t2; - -import java.io.Serializable; -import java.sql.SQLException; - -abstract class BaseRow - implements Serializable, Cloneable { - - protected Object origVals[]; - - BaseRow() { - } - - protected abstract Object getColumnObject(int i) throws SQLException; - - protected Object[] getOrigRow() { - return origVals; - } - - protected abstract void setColumnObject(int i, Object obj) throws SQLException; -} http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/InsertRow.java ---------------------------------------------------------------------- diff --git a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/InsertRow.java b/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/InsertRow.java deleted file mode 100644 index 54bb150..0000000 --- a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/InsertRow.java +++ /dev/null @@ -1,89 +0,0 @@ -// @@@ START COPYRIGHT @@@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// @@@ END COPYRIGHT @@@ - -package org.trafodion.jdbc.t2; - -import java.io.Serializable; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.PreparedStatement; -import java.util.BitSet; - - -class InsertRow extends BaseRow - implements Serializable, Cloneable { - - private BitSet colsInserted; - private int cols; - - InsertRow(int i) { - origVals = new Object[i]; - colsInserted = new BitSet(i); - cols = i; - } - - protected Object getColumnObject(int i) throws SQLException { - if(!colsInserted.get(i - 1)) - throw new SQLException("No value has been inserted"); - else - return origVals[i - 1]; - } - - protected void initInsertRow() { - for(int i = 0; i < cols; i++) - colsInserted.clear(i); - - } - - /* - protected boolean isCompleteRow(RowSetMetaData rowsetmetadata) throws SQLException { - for(int i = 0; i < cols; i++) - if(!colsInserted.get(i) && rowsetmetadata.isNullable(i + 1) == 0) - return false; - - return true; - } - */ - - protected void markColInserted(int i) { - colsInserted.set(i); - } - - protected void setColumnObject(int i, Object obj) { - origVals[i - 1] = obj; - markColInserted(i - 1); - } - - protected void insertRow(PreparedStatement insertStmt, BitSet paramCols) throws SQLException - { - int i; - int j; - - for (i = 0, j= 1; i < cols ; i++) - { - if (paramCols.get(i)) - insertStmt.setObject(j++, origVals[i]); - } - insertStmt.execute(); - initInsertRow(); - } -} - http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/Row.java ---------------------------------------------------------------------- diff --git a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/Row.java b/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/Row.java deleted file mode 100644 index b8104aa..0000000 --- a/core/conn/jdbc_type2/src/main/java/org/trafodion/jdbc/t2/Row.java +++ /dev/null @@ -1,275 +0,0 @@ -// @@@ START COPYRIGHT @@@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// @@@ END COPYRIGHT @@@ - -/* -* Filename : Row.java -* Description : -* -*/ - -package org.trafodion.jdbc.t2; - -import java.io.Serializable; -import java.sql.*; -import java.util.BitSet; -import java.util.Locale; - -// Referenced classes of package sun.jdbc.rowset: -// BaseRow - -class Row extends BaseRow - implements Serializable, Cloneable { - - private Object currentVals[]; - private BitSet colsChanged; - private boolean deleted; - private boolean updated; - private boolean inserted; - private int numCols; - - Row(int i) { - origVals = new Object[i]; - currentVals = new Object[i]; - colsChanged = new BitSet(i); - numCols = i; - } - - Row(int i, Object aobj[]) { - origVals = new Object[i]; - for(int j = 0; j < i; j++) - origVals[j] = aobj[j]; - - currentVals = new Object[i]; - colsChanged = new BitSet(i); - numCols = i; - } - - protected void clearDeleted() { - deleted = false; - } - - protected void clearInserted() { - inserted = false; - } - - protected void clearUpdated() { - updated = false; - for(int i = 0; i < numCols; i++) { - currentVals[i] = null; - colsChanged.clear(i); - } - - } - - protected boolean getColUpdated(int i) { - return colsChanged.get(i); - } - - protected Object getColumnObject(int i) throws SQLException { - if(getColUpdated(i - 1)) - return currentVals[i - 1]; - else - return origVals[i - 1]; - } - - protected boolean getDeleted() { - return deleted; - } - - protected boolean getInserted() { - return inserted; - } - - protected boolean getUpdated() { - return updated; - } - - protected void initColumnObject(int i, Object obj) { - origVals[i - 1] = obj; - } - - protected void moveCurrentToOrig() { - for(int i = 0; i < numCols; i++) - if(getColUpdated(i)) { - origVals[i] = currentVals[i]; - currentVals[i] = null; - colsChanged.clear(i); - } - } - - private void setColUpdated(int i) { - colsChanged.set(i); - } - - protected void setColumnObject(int i, Object obj) { - currentVals[i - 1] = obj; - setColUpdated(i - 1); - } - - protected void setLobObject(int i, Object obj) { - currentVals[i - 1] = obj; - origVals[i-1] = obj; - } - - - protected void setDeleted() { - deleted = true; - } - - protected void setInserted() { - inserted = true; - } - - protected void setUpdated() { - updated = true; - } - - protected void deleteRow(Locale locale, PreparedStatement deleteStmt, BitSet paramCols) throws SQLException - { - int i; - int j; - int count; - - for (i = 0, j = 1; i < numCols ; i++) - { - if (paramCols.get(i)) - deleteStmt.setObject(j++, origVals[i]); - } - count = deleteStmt.executeUpdate(); - if (count == 0) - throw SQLMXMessages.createSQLException(locale, "row_modified", null); - } - - protected void updateRow(Locale locale, PreparedStatement updateStmt, BitSet paramCols, BitSet keyCols) throws SQLException - { - int i; - int j; - int count; - Object obj; - int numPKey=0; - int loc=0; - int pKeyCounter=1; - - for (i = 0; i < numCols; i++ ) - { - if(keyCols.get(i)) - numPKey++; - } - - loc = numCols - numPKey; - - for (i = 0, j = 1; i < numCols ; i++) - { - if (keyCols.get(i)) - { - if (getColUpdated(i)) - throw SQLMXMessages.createSQLException(locale, "primary_key_not_updateable", null); - updateStmt.setObject((loc+pKeyCounter),getColumnObject(i+1)); - pKeyCounter++; - } - else - { - { - obj = getColumnObject((i+1)); - if (obj instanceof SQLMXLob) - { - if (obj == origVals[i]) // New and old Lob objects are same - { - updateStmt.setObject(j++, new DataWrapper((int) ((SQLMXLob)obj).dataLocator_)); - continue; - } - } - updateStmt.setObject(j++, obj); - } - } - } - - - /* - for (i = 0 ; i < numCols ; i++) - { - if (paramCols.get(i)) - { - obj = origVals[i]; - if (obj instanceof SQLMXLob) - { - updateStmt.setObject(j++, new DataWrapper(((SQLMXLob)obj).dataLocator_)); - continue; - } - updateStmt.setObject(j++, origVals[i]); - } - } */ - count = updateStmt.executeUpdate(); - if (count == 0) - throw SQLMXMessages.createSQLException(locale, "row_modified", null); - moveCurrentToOrig(); - setUpdated(); - } - - protected void refreshRow(Locale locale, PreparedStatement selectStmt, BitSet selectCols, BitSet keyCols) throws SQLException - { - int i; - int j; - ResultSet rs; - ResultSetMetaData rsmd; - int columnCount; - - clearUpdated(); - - for (i = 0, j = 1; i < numCols ; i++) - { - if (keyCols.get(i)) - selectStmt.setObject(j++, origVals[i]); - } - rs = selectStmt.executeQuery(); - if (rs != null) - { - try { - rsmd = rs.getMetaData(); - - columnCount = rsmd.getColumnCount(); - for (i = 0, j = 1 ; i < numCols ; i++) - { - if (selectCols.get(i)) - origVals[i] = rs.getObject(j++); - } - } finally { - rs.close(); - } - } - } - - protected void closeLobObjects() - { - int i; - SQLMXLob lob; - - for (i = 0; i < numCols ; i++) - { - if (currentVals[i] instanceof SQLMXLob) - { - lob = (SQLMXLob)currentVals[i]; - lob.close(); - } - - } - } -} http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sqenvcom.sh ---------------------------------------------------------------------- diff --git a/core/sqf/sqenvcom.sh b/core/sqf/sqenvcom.sh index bfec256..3295b30 100644 --- a/core/sqf/sqenvcom.sh +++ b/core/sqf/sqenvcom.sh @@ -146,14 +146,13 @@ export HBASE_DEP_VER_HDP=1.1.2.2.3.2.0-2950 export HIVE_DEP_VER_HDP=1.2.1.2.3.2.0-2950 export HBASE_DEP_VER_APACHE=1.0.2 export HIVE_DEP_VER_APACHE=1.1.0 -export THRIFT_DEP_VER=0.9.0 export HBASE_TRX_ID_CDH=hbase-trx-cdh5_4 export HBASE_TRX_ID_APACHE=hbase-trx-apache1_0_2 export HBASE_TRX_ID_HDP=hbase-trx-hdp2_3 +export THRIFT_DEP_VER=0.9.0 export HIVE_DEP_VER=0.13.1 export HADOOP_DEP_VER=2.6.0 - # staged build-time dependencies export HADOOP_BLD_LIB=${TOOLSDIR}/hadoop-${HADOOP_DEP_VER}/lib/native export HADOOP_BLD_INC=${TOOLSDIR}/hadoop-${HADOOP_DEP_VER}/include @@ -179,7 +178,6 @@ if [[ "$HBASE_DISTRO" = "APACHE" ]]; then export SQL_JAR=trafodion-sql-${HBVER}-${TRAFODION_VER}.jar fi - # check for workstation env # want to make sure SQ_VIRTUAL_NODES is set in the shell running sqstart # so we can determine if we are on a workstation or not http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/ilh_cleanhb ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/ilh_cleanhb b/core/sqf/sql/scripts/ilh_cleanhb new file mode 100755 index 0000000..0b1076b --- /dev/null +++ b/core/sqf/sql/scripts/ilh_cleanhb @@ -0,0 +1,106 @@ +#!/bin/bash +# +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ +# + +# +# This script cleans up the HBase env +# that was setup by install_local_hadoop. +# It requires Hadoop to be up. +# +# - kills the HBase Master process (if it exists) +# - deletes the HBase log files +# - deletes the HDFS directory /hbase +# +# After running this script: +# - swstarthbase +# - sqstart +# - initialize trafodion +# + +function getMyNameNode { + jps | grep -w NameNode | cut -d' ' -f1 +} + +function getMyDataNode { + jps | grep -w DataNode | cut -d' ' -f1 +} + +function getMyHMaster { + jps | grep HMaster | cut -d' ' -f1 +} + +function killMyHMaster { + + lv_hbpid=`getMyHMaster` + if [ ! -z ${lv_hbpid} ]; then + echo "Killing HMaster pid: ${lv_hbpid}" + kill -9 ${lv_hbpid} + else + echo "There's no HMaster process to kill" + fi + +} + +lv_clean_hb_data=0 +if [[ ! -z "$1" ]]; then + lv_clean_hb_data=$1 +fi + +lv_nnpid=`getMyNameNode` +if [[ -z ${lv_nnpid} ]]; then + echo "NameNode is not up - please run swstarthadoop to start it or check the Hadoop logs - exitting..." + exit 1 +fi +echo "NameNode pid: ${lv_nnpid}" + +lv_dnpid=`getMyDataNode` +if [[ -z ${lv_dnpid} ]]; then + echo "DataNode is not up - please run swstarthadoop to start it or check the Hadoop logs - exitting..." + exit 1 +fi +echo "DataNode pid: ${lv_dnpid}" + +# stop / kill the HMaster process +killMyHMaster + +# delete the zookeeper info +if [[ ${lv_clean_hb_data} -gt 0 ]]; then + lv_zk_dir="${MY_SW_ROOT}/hdfs:" + echo "Deleting the zookeeper directory: ${lv_zk_dir}" + rm -rf ${lv_zk_dir} +fi + +#delete the logs +echo "Deleting HBase logs in: ${MY_SQROOT}/sql/local_hadoop/hbase/logs" +if [ ! -z ${MY_SQROOT} ]; then + rm -rf ${MY_SQROOT}/sql/local_hadoop/hbase/logs/*.log + rm -rf ${MY_SQROOT}/sql/local_hadoop/hbase/logs/*log.[0-9]* + rm -rf ${MY_SQROOT}/sql/local_hadoop/hbase/logs/*.out + rm -rf ${MY_SQROOT}/sql/local_hadoop/hbase/logs/*.out.[0-9]* +fi + +if [[ ${lv_clean_hb_data} -gt 1 ]]; then +# delete the HBase table data from HDFS + echo "Deleting the HBase data directory (hdfs) /hbase" + swhdfs dfs -rm -f -r -skipTrash /hbase +fi http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/ilh_hbase_repair ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/ilh_hbase_repair b/core/sqf/sql/scripts/ilh_hbase_repair new file mode 100755 index 0000000..547a539 --- /dev/null +++ b/core/sqf/sql/scripts/ilh_hbase_repair @@ -0,0 +1,65 @@ +#!/bin/bash +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ + +#################################################### +# +# Executes the following: +# +# - Executes: hbase hbck -repair +# +#################################################### + +if [ ! -z $MY_SQROOT ]; then + cd $MY_SQROOT/sql/scripts +else + echo "The environment variable MY_SQROOT does not exist." + echo "Please ensure sqenv.sh has been sourced." + echo + exit 1; +fi + +echo 'This script will execute: hbase hbck -repair' + +if [[ -z $1 ]]; then + read -p 'Do you want to proceed? y/n : ' lv_ans +else + lv_ans=$1 +fi + +if [ "$lv_ans" != "y" ]; then + echo "Not proceeding. Exitting..." + exit 0 +else + echo "Ok...going ahead..." +fi + +lv_starttime=`date` + +lv_stderr_file="$MY_SQROOT/logs/hbase_hbck_repair.log" +echo "Stderr being written to the file: ${lv_stderr_file}" +${HBASE_HOME}/bin/hbase hbck -repair > ${lv_stderr_file} 2>${lv_stderr_file} + +echo "done with hbase hbck. return code: $?" +lv_endtime=`date` +echo "Start time: ${lv_starttime}" +echo "End time: ${lv_endtime}" +exit 0 http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/ilh_hbcheck ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/ilh_hbcheck b/core/sqf/sql/scripts/ilh_hbcheck new file mode 100755 index 0000000..2c6149c --- /dev/null +++ b/core/sqf/sql/scripts/ilh_hbcheck @@ -0,0 +1,72 @@ +#!/bin/bash +# +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ +# + +if [ -z $JAVA_HOME ]; then + echo "The environment variable \$JAVA_HOME has not been set" + echo "Please ensure \$MY_SQROOT/sqenv.sh has been sourced." + echo + exit 1; +fi + +cd $MY_SQROOT/tools/check_hbase_available +echo "Building some utility Java programs." +. ./build 2>/dev/null +$JAVA_HOME/bin/java CheckHBase | grep -v "Checking" | grep "HBase is available" +lv_status=$? +if [[ $lv_status == 0 ]]; then + echo "HBase Master is available." +else + echo "HBase Master is not accessible. Please check the HBase logs" + exit 1 +fi + +echo +echo "Checking if HBase is functional..." +echo + +swhbase <<EOF > hbase_status_detailed.out +status 'detailed' +EOF + +declare -i lv_num_live_rs +lv_num_live_rs=`cat hbase_status_detailed.out | grep live | cut -d' ' -f1` +echo "Number of live region servers: ${lv_num_live_rs}" +if [ ${lv_num_live_rs} '>' '0' ] ; then + echo + echo "Checking if regions are online" +else + echo "There does not seem to be any live region server available" + echo "Please check the HBase logs or the management interfaces" + exit 1 +fi + +declare -i lv_num_online_regions +lv_num_online_regions=`cat hbase_status_detailed.out | grep -o "numberOfOnlineRegions=[0-9]*" | cut -d= -f2` +echo "The number of online regions: ${lv_num_online_regions}" +if [ ${lv_num_online_regions} -lt 2 ]; then + echo "Error: There does not seem to be enough online region. Exitting..." + exit 1 +fi + +exit 0 http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/ilh_traf_restart ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/ilh_traf_restart b/core/sqf/sql/scripts/ilh_traf_restart new file mode 100755 index 0000000..ed5141c --- /dev/null +++ b/core/sqf/sql/scripts/ilh_traf_restart @@ -0,0 +1,117 @@ +#!/bin/bash +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ + +#################################################### +# +# Executes the following: +# +# - Kills the Trafodion Env +# - Kills the HBase Master process and the zookeeper data +# (uses ilh_cleanhb 1) +# - Restarts HBase +# - Repairs HBase (ilh_hbase_repair) +# - Checks whether HBase is ready (uses ilh_hbcheck) +# - Deletes the Trafodion env file: $MY_SQROOT/etc/ms.env +# - Regenerates Config (sqgen) +# - Starts up Trafodion (sqstart) +# +#################################################### + +if [ ! -z $MY_SQROOT ]; then + cd $MY_SQROOT/sql/scripts +else + echo "The environment variable MY_SQROOT does not exist." + echo "Please ensure sqenv.sh has been sourced." + echo + exit 1; +fi + +lv_ilh_cleanhb_mode=1 + +echo 'This script will do the following:' +echo ' - Kill the Trafodion env (pkillall)' +echo " - Kill the HBase env and delete the zookeeper data directory (ilh_cleanhb ${lv_ilh_cleanhb_mode})" +echo ' - Restart HBase (swstarthbase)' +echo ' - Delete the env file: $MY_SQROOT/etc/ms.env' +echo ' - sqgen' +echo ' - sqstart' + +if [[ -z $1 ]]; then + read -p 'Do you want to proceed? y/n : ' lv_ans +else + lv_ans=$1 +fi + +if [ "$lv_ans" != "y" ]; then + echo "Not proceeding. Exitting..." + exit 0 +else + echo "Ok...going ahead..." +fi + +lv_starttime=`date` + +run_util pkillall +sleep 5 +run_util pkillall + +echo "Stopping HBase" +swstophbase & +sleep 45 +kill %1 + +run_util 'ilh_cleanhb 1' + +run_util swstarthbase + +echo "Sleeping for 15 seconds before checking the status of HBase" +sleep 15 + +echo "--------------------------------------" +if [ -e $MY_SQROOT/sql/scripts/hbcheck ]; then + run_util hbcheck 4 10 +else + run_util ilh_hbcheck +fi +echo "--------------------------------------" + +echo "Repairing HBase (hbase hbck -repair)" +ilh_hbase_repair y + +echo "--------------------------------------" +echo "deleting $MY_SQROOT/etc/ms.env" +rm $MY_SQROOT/etc/ms.env + +run_util sqgen + +run_util sqstart + +echo "executing: sqlci -> get schemas" +sqlci <<EOF +get schemas; +EOF + +echo "done" +lv_endtime=`date` +echo "Start time: ${lv_starttime}" +echo "End time: ${lv_endtime}" +exit 0 http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/ilh_trafinit ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/ilh_trafinit b/core/sqf/sql/scripts/ilh_trafinit new file mode 100755 index 0000000..77669c1 --- /dev/null +++ b/core/sqf/sql/scripts/ilh_trafinit @@ -0,0 +1,108 @@ +#!/bin/bash +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ + +#################################################### +# +# Executes the following: +# +# - Kills the Trafodion Env +# - Kills the HBase env and the HDFS data directory associated with HBase +# (uses ilh_cleanhb) +# - Restarts HBase +# - Checks whether HBase is ready (uses ilh_hbcheck) +# - Deletes the Trafodion env file: $MY_SQROOT/etc/ms.env +# - Regenerates Config (sqgen) +# - Starts up Trafodion (sqstart) +# - Initializes Trafodion (sqlci -> initialize trafodion) +# +#################################################### + +if [ ! -z $MY_SQROOT ]; then + cd $MY_SQROOT/sql/scripts +else + echo "The environment variable MY_SQROOT does not exist." + echo "Please ensure sqenv.sh has been sourced." + echo + exit 1; +fi + +echo 'This script will do the following:' +echo ' - Kill the Trafodion env (pkillall)' +echo ' - Kill the HBase env and Delete the HDFS data directory associated with HBase (ilh_cleanhb 2)' +echo ' - Restart HBase (swstarthbase)' +echo ' - Delete the env file: $MY_SQROOT/etc/ms.env' +echo ' - sqgen' +echo ' - sqstart' +echo ' - sqlci -> initialize trafodion' + +if [[ -z $1 ]]; then + read -p 'Do you want to proceed? y/n : ' lv_ans +else + lv_ans=$1 +fi + +if [ "$lv_ans" != "y" ]; then + echo "Not proceeding. Exitting..." + exit 0 +else + echo "Ok...going ahead..." +fi + +lv_starttime=`date` +run_util pkillall +sleep 5 +run_util pkillall + +run_util 'ilh_cleanhb 2' + +run_util swstarthbase + +echo "Sleeping for 10 seconds before checking the status of HBase" +sleep 10 +if [ -e $MY_SQROOT/sql/scripts/hbcheck ]; then + run_util hbcheck 4 10 +else + run_util ilh_hbcheck +fi + +echo "deleting $MY_SQROOT/etc/ms.env" +rm $MY_SQROOT/etc/ms.env + +run_util sqgen + +run_util sqstart + +echo "executing: sqlci -> initialize trafodion" +sqlci <<EOF +initialize trafodion; +EOF + +echo "executing: sqlci -> get schemas" +sqlci <<EOF +get schemas; +EOF + +echo "done" +lv_endtime=`date` +echo "Start time: ${lv_starttime}" +echo "End time: ${lv_endtime}" +exit 0 http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/install_hadoop_regr_test_env ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/install_hadoop_regr_test_env b/core/sqf/sql/scripts/install_hadoop_regr_test_env index 17da726..5fea8be 100755 --- a/core/sqf/sql/scripts/install_hadoop_regr_test_env +++ b/core/sqf/sql/scripts/install_hadoop_regr_test_env @@ -155,7 +155,7 @@ if [ -z "$MY_TPCDS_UNPACK_DIR" ]; then MY_TPCDS_UNPACK_DIR=. cd -P . else - mkdir $MY_TPCDS_UNPACK_DIR + mkdir -p $MY_TPCDS_UNPACK_DIR cd $MY_TPCDS_UNPACK_DIR fi @@ -183,7 +183,7 @@ fi make >>${MY_LOG_FILE} 2>&1 echo "Generating the data..." | tee -a ${MY_LOG_FILE} - mkdir $MY_TPCDS_DATA_DIR + mkdir -p $MY_TPCDS_DATA_DIR SCALE=1 FORCE=Y http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/install_local_hadoop ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/install_local_hadoop b/core/sqf/sql/scripts/install_local_hadoop index 4cc74c4..84643e2 100755 --- a/core/sqf/sql/scripts/install_local_hadoop +++ b/core/sqf/sql/scripts/install_local_hadoop @@ -745,10 +745,15 @@ EOF cat <<EOF >$MY_SW_SCRIPTS_DIR/sw_env.sh # Basic environment variables for Trafodion/Hadoop/Hive/HBase/MySQL setup export JAVA_HOME=${JAVA_HOME} +export JAVA_LIBRARY_PATH=${LD_LIBRARY_PATH} +export MY_SW_SCRIPTS_DIR=${MY_SW_SCRIPTS_DIR} +export MY_SW_ROOT=${MY_SW_ROOT} export MYSQL_HOME=${MYSQL_HOME} export YARN_HOME=${YARN_HOME} export HIVE_HOME=${HIVE_HOME} export HBASE_HOME=${HBASE_HOME} +export HADOOP_PID_DIR=${MY_SW_ROOT}/tmp +export HBASE_PID_DIR=${MY_SW_ROOT}/tmp export MY_HADOOP_HDFS_PORT_NUM=${MY_HADOOP_HDFS_PORT_NUM} export MY_HBASE_ZOOKEEPER_PEERPORT_NUM=${MY_HBASE_ZOOKEEPER_PEERPORT_NUM} export MY_HBASE_ZOOKEEPER_LEADERPORT_NUM=${MY_HBASE_ZOOKEEPER_LEADERPORT_NUM} @@ -1597,8 +1602,7 @@ echo "$MY_LOCAL_SW_DIST/${HBASE_TAR}" <value> org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver, org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint, - org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint, - org.apache.hadoop.hbase.coprocessor.AggregateImplementation ${VISIBILITY_COPROC} + org.apache.hadoop.hbase.coprocessor.AggregateImplementation </value> </property> </configuration> http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/trafnodestatus ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/trafnodestatus b/core/sqf/sql/scripts/trafnodestatus new file mode 100755 index 0000000..f417471 --- /dev/null +++ b/core/sqf/sql/scripts/trafnodestatus @@ -0,0 +1,105 @@ + #!/bin/bash +# +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ +# +# Script to check the trafodion-status of the nodes +# of a trafodion cluster. +# +# Switch "-j" prints the output in JSON format. +# + +function traf_echo { + if [[ ${lv_jason_output} == "1" ]]; then + if [[ ${lv_first} == "1" ]]; then + lv_first=0 + echo -n "[" + else + echo -n "," + fi + echo -n "{\"ERROR\":\"$*\"}" + else + echo $* + fi + +} + +function traf_exit { + if [[ ${lv_jason_output} == "1" ]]; then + if [[ ${lv_first} == "0" ]]; then + echo "]" + fi + fi + exit $1 +} + +lv_first=1 +lv_jason_output=0 +if [[ ! -z $1 ]]; then + if [[ $1 == "-j" ]]; then + lv_jason_output=1 + fi +fi + +if [[ -z $MY_SQROOT ]]; then + traf_echo "Looks like the Trafodion environment has not been configured. Exitting..." + traf_exit 5; +fi + +if [[ -z $SQSCRIPTS_DIR ]]; then + SQSCRIPTS_DIR=$MY_SQROOT/sql/scripts +fi +SQCONFIGDB_FILE="$SQSCRIPTS_DIR/sqconfig.db" +if [[ ! -e ${SQCONFIGDB_FILE} ]]; then + traf_echo "Cannot find the Trafodion configuration DB file ${SQCONFIGDB_FILE}" + traf_echo "Please execute sqgen to generate this file." + traf_exit 5; +fi + +grep_out=`pstat | grep "monitor COLD" | grep -v mpirun` +if [[ $? == '0' ]]; then + sqshell -c node info | awk -v jason_output=${lv_jason_output} -f $MY_SQROOT/sql/scripts/trafnodestatus.awk + traf_exit $? +else + traf_echo "Trafodion is not running on the current node: `hostname`" + lv_monitor_not_running_on_curr_node=1 +fi + +if [[ ! -z ${SQ_VIRTUAL_NODES} ]]; then + # Running in a single node environment + traf_echo "Trafodion has been configured to run on a single virtual node environment." + traf_exit 1 +fi + +grep_out_mon=`$SQ_ROOT/sql/scripts/cstat | grep "monitor COLD" ` +lv_ret=$? +if [[ $lv_ret == 0 ]]; then + grep_out=`cstat | grep "monitor COLD" | grep -v mpirun | sort | cut -d: -f1` + for lv_node in $grep_out; do + ssh $lv_node sqshell -c node info | awk -v jason_output=${lv_jason_output} -f $MY_SQROOT/sql/scripts/trafnodestatus.awk + lv_sqcheck_retcode=$? + traf_exit ${lv_exit_code} + done +else + traf_echo "None of the Trafodion nodes have a Trafodion monitor running on them." + traf_echo "Trafodion is currently down." + traf_exit 255; +fi http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/sql/scripts/trafnodestatus.awk ---------------------------------------------------------------------- diff --git a/core/sqf/sql/scripts/trafnodestatus.awk b/core/sqf/sql/scripts/trafnodestatus.awk new file mode 100644 index 0000000..a1849a7 --- /dev/null +++ b/core/sqf/sql/scripts/trafnodestatus.awk @@ -0,0 +1,76 @@ +# +# @@@ START COPYRIGHT @@@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# @@@ END COPYRIGHT @@@ +# +# awk script used by the trafnodestatus shell-script +# to check the trafodion-status of the nodes +# of a trafodion cluster. +# +BEGIN { + lv_header_line_number = 0; + lv_lines_since_header = 0; + lv_first = 1; + lv_got_data = 0; +} + +{ + if (lv_header_line_number > 0) { + lv_lines_since_header = FNR - lv_header_line_number; + if (lv_lines_since_header >= 3) { + if ( ((lv_lines_since_header - 1) % 2) == 0) { + lv_node_status = toupper($3); + if (lv_node_status ~ "UP") { + lv_node_name = $8; + } + else { + lv_node_name = $4; + } + if (jason_output == 1) { + if (lv_first == 1) { + lv_first = 0; + printf("["); + lv_got_data = 1; + } + else { + printf(","); + } + printf("{\"NODE\":\"%s\",\"STATUS\":\"%s\"}", + lv_node_name, + lv_node_status); + } + else { + printf ("%s [ %s ]\n", + lv_node_name, + lv_node_status); + } + } + } + } +} + +/MemFree SwapFree/ {lv_header_line_number = FNR;} + +END { + if ((jason_output == 1) && + (lv_got_data == 1)) { + printf("]\n"); + } +} http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sqf/tools/sqtools.sh ---------------------------------------------------------------------- diff --git a/core/sqf/tools/sqtools.sh b/core/sqf/tools/sqtools.sh index 0c7767c..d369959 100644 --- a/core/sqf/tools/sqtools.sh +++ b/core/sqf/tools/sqtools.sh @@ -266,6 +266,58 @@ function ndbm { eval '$SQPDSHA "df -h | grep database" 2>/dev/null | wc -l' } +function chkReturnCodeExit { + if [[ $1 != 0 ]]; then + echo "$2 returned error $1, exitting..." + exit $1; + else + echo "$2 executed successfully." + fi +} + +function chkReturnCode { + if [[ $1 != 0 ]]; then + echo "$2 returned error $1..." + return $1; + else + echo "$2 executed successfully." + return 0 + fi +} + +# $1: program/utility/script to run +# $2: (optional): retry count (default 0) +# $3: (optional): sleep for this many seconds +# +function run_util { + echo "--------------------------------------" + lv_cmd=$* + echo "executing: $1" + $1 + lv_stat=$? + if [ ! -z $3 ]; then + declare -i lv_retries=0 + while [ $lv_retries -lt $2 ]; do + let lv_retries=($lv_retries+1) + chkReturnCode ${lv_stat} $1 + if [ $? != 0 ]; then + if [ $lv_retries -lt $2 ]; then + echo "retrying in $3 seconds" + sleep $3 + $1 + lv_stat=$? + else + exit ${lv_stat} + fi + else + return 0 + fi + done + else + chkReturnCodeExit ${lv_stat} $1 + fi + echo "--------------------------------------" +} # check the startup log and sort the interesting even chronologically function sqchksl { setup_sqpdsh @@ -906,6 +958,10 @@ export -f sqpostsem export -f sqgdb_doit export -f sq_gdb_main +export -f chkReturnCodeExit +export -f chkReturnCode +export -f run_util + # A front end to sq_gdb_main (as sq_gdb_main is a function, this function executes sq_gdb_main in a fresh # bash context and that allows background tasks spawned by sq_gdb_main to be managed). function sq_gdb { http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/bin/SqlciErrors.txt ---------------------------------------------------------------------- diff --git a/core/sql/bin/SqlciErrors.txt b/core/sql/bin/SqlciErrors.txt index 0e1b24b..045b1f1 100644 --- a/core/sql/bin/SqlciErrors.txt +++ b/core/sql/bin/SqlciErrors.txt @@ -801,7 +801,7 @@ $1~String1 -------------------------------- 3049 42000 99999 BEGINNER MAJOR DBADMIN Host variables, such as $0~string0, are not allowed in a dynamic compilation. 3050 42000 99999 BEGINNER MAJOR DBADMIN The constraint must have the same catalog and schema as the specified table. 3051 42000 99999 BEGINNER MAJOR DBADMIN Duplicate HEADING clauses were specified in column definition $0~ColumnName. -3052 42000 99999 BEGINNER MAJOR DBADMIN Duplicate NOT NULL clauses were specified in column definition $0~ColumnName. +3052 42000 99999 BEGINNER MAJOR DBADMIN Duplicate $0~String0 clauses were specified in column definition $0~ColumnName. 3053 42000 99999 BEGINNER MAJOR DBADMIN Duplicate PRIMARY KEY clauses were specified in column definition $0~ColumnName. 3054 42000 99999 BEGINNER MAJOR DBADMIN The NOT DROPPABLE clause is allowed only in PRIMARY KEY and NOT NULL constraint definitions. 3055 42000 99999 BEGINNER MAJOR DBADMIN Duplicate DELETE rules were specified. http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/comexe/ComTdbHbaseAccess.h ---------------------------------------------------------------------- diff --git a/core/sql/comexe/ComTdbHbaseAccess.h b/core/sql/comexe/ComTdbHbaseAccess.h index f430b39..51bc7ca 100644 --- a/core/sql/comexe/ComTdbHbaseAccess.h +++ b/core/sql/comexe/ComTdbHbaseAccess.h @@ -269,19 +269,38 @@ public: {(v ? flags_ |= USE_SMALL_SCANNER : flags_ &= ~USE_SMALL_SCANNER); }; NABoolean useSmallScanner() - { return (flags_ & USE_SMALL_SCANNER) != 0; }; + { return (flags_ & (USE_SMALL_SCANNER | USE_SMALL_SCANNER_FOR_MDAM)) != 0; }; + void setUseSmallScannerForProbes(NABoolean v) + {(v ? flags_ |= USE_SMALL_SCANNER_FOR_PROBES : + flags_ &= ~USE_SMALL_SCANNER_FOR_PROBES); }; + NABoolean useSmallScannerForProbes() + { return (flags_ & USE_SMALL_SCANNER_FOR_PROBES) != 0; }; + + void setUseSmallScannerForMDAMifNeeded(UInt32 numRowRetrieved){ + //if last scan of mdam fitted in one block, and small scanner CQD is either ON or SYSTEM (this is summarized in USE_SMALL_SCANNER_FOR_PROBES) + //then next MDAM scan can use small scanner. Most likely it is about same size as previous one. + if ((numRowRetrieved < maxNumRowsPerHBaseBlock_) && ((flags_ & USE_SMALL_SCANNER_FOR_PROBES) != 0)) + flags_ |= USE_SMALL_SCANNER_FOR_MDAM; + else + flags_ &= ~USE_SMALL_SCANNER_FOR_MDAM; + } + void setMaxNumRowsPerHbaseBlock(UInt32 n) { maxNumRowsPerHBaseBlock_ = n;} + UInt32 maxNumRowsPerHbaseBlock() { return maxNumRowsPerHBaseBlock_; } private: enum { - CACHE_BLOCKS = 0x0001, - USE_MIN_MDAM_PROBE_SIZE = 0x0002, - USE_SMALL_SCANNER = 0x0004 + CACHE_BLOCKS = 0x0001, + USE_MIN_MDAM_PROBE_SIZE = 0x0002, + USE_SMALL_SCANNER = 0x0004, + USE_SMALL_SCANNER_FOR_PROBES = 0x0008, + USE_SMALL_SCANNER_FOR_MDAM = 0x0010 }; UInt32 flags_; UInt32 numCacheRows_; + UInt32 maxNumRowsPerHBaseBlock_; }; // --------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/executor/ExHbaseSelect.cpp ---------------------------------------------------------------------- diff --git a/core/sql/executor/ExHbaseSelect.cpp b/core/sql/executor/ExHbaseSelect.cpp index 5aabd9a..2bd904b 100644 --- a/core/sql/executor/ExHbaseSelect.cpp +++ b/core/sql/executor/ExHbaseSelect.cpp @@ -444,6 +444,16 @@ ExWorkProcRetcode ExHbaseScanSQTaskTcb::work(short &rc) Lng32 retcode = 0; rc = 0; Lng32 remainingInBatch = batchSize_; + NABoolean isFirstBatch = false; + // isFirstInBatch is a stack variable for optimization reason. It is used for the mdam small scanner optimization heuristic that + // is performed at runtime. Since this function is invoke intensively for all scan (mdam or regular scan), minimizing CPU/memory access + // impact on runtime code to a strict minimum is attempted. Given that we are trying to detect if the actual scan is bellow the size + // of an HBase block, having the runtime logic performing the detection only affect the first work invoke looks like the right idea. + // and leveraging an existing counter (remainingInBatch) instead of creating a new one. The reasonable asumption to allow this is that + // 1- batchSize_ being 8K, most likely times the row size, we are good in assuming that first hbase block will fit in batchSize + // 2- parent buffer size will be large enough to deal with one HBAse_Block_size without having to rely on re-invoking work in the middle. + // and anyway, if none of the reasonable assumption is true, then that's fine, the heuristic won't work, and we will use regular scanner, + // meaning optimization is off for the scan part of MDAM (still on for the probe side of it). while (1) { @@ -467,7 +477,7 @@ ExWorkProcRetcode ExHbaseScanSQTaskTcb::work(short &rc) step_ = HANDLE_ERROR; break; } - + isFirstBatch = true; retcode = tcb_->ehi_->scanOpen(tcb_->table_, tcb_->beginRowId_, tcb_->endRowId_, tcb_->columns_, -1, @@ -606,6 +616,8 @@ ExWorkProcRetcode ExHbaseScanSQTaskTcb::work(short &rc) case SCAN_CLOSE: { + if (isFirstBatch) //only if closed happen in a single batch, batchSize - remainingInBatch = nb rows retrieved + tcb_->hbaseAccessTdb().getHbasePerfAttributes()->setUseSmallScannerForMDAMifNeeded(batchSize_ - remainingInBatch); //calculate MDAM small scanner flag for next scan if it was MDAM retcode = tcb_->ehi_->scanClose(); if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose")) step_ = HANDLE_ERROR; @@ -647,8 +659,9 @@ Lng32 ExHbaseScanSQTaskTcb::getProbeResult(char* &keyData) tcb_->beginRowId_, tcb_->endRowId_, tcb_->columns_, -1, tcb_->hbaseAccessTdb().readUncommittedScan(), - tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(), - tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScanner(), + tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks() || + tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScannerForProbes(), // when small scanner feature is ON or SYSTEM force cache ON + tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScannerForProbes(), probeSize, TRUE, NULL, NULL, NULL); if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen")) http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/generator/GenRelScan.cpp ---------------------------------------------------------------------- diff --git a/core/sql/generator/GenRelScan.cpp b/core/sql/generator/GenRelScan.cpp index a919958..b56a7ae 100644 --- a/core/sql/generator/GenRelScan.cpp +++ b/core/sql/generator/GenRelScan.cpp @@ -232,7 +232,7 @@ int HbaseAccess::createAsciiColAndCastExpr(Generator * generator, // HIVE_FILE_CHARSET can only be empty or GBK else if ( needTranslate == TRUE ) { - asciiType = new (h) SQLVarChar(sizeof(Int64)/2, newGivenType->supportsSQLnull(), + asciiType = new (h) SQLVarChar(sizeof(Int64), newGivenType->supportsSQLnull(), FALSE, FALSE, CharInfo::GBK); } else http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/generator/Generator.cpp ---------------------------------------------------------------------- diff --git a/core/sql/generator/Generator.cpp b/core/sql/generator/Generator.cpp index d1e174b..24b7eb9 100644 --- a/core/sql/generator/Generator.cpp +++ b/core/sql/generator/Generator.cpp @@ -3147,9 +3147,15 @@ void Generator::setHBaseSmallScanner(Int32 hbaseRowSize, double estRowsAccessed, { if (CmpCommon::getDefault(HBASE_SMALL_SCANNER) == DF_SYSTEM) { - if((hbaseRowSize*estRowsAccessed)<hbaseBlockSize) + if(((hbaseRowSize*estRowsAccessed)<hbaseBlockSize) && (estRowsAccessed>0))//added estRowsAccessed > 0 because MDAM costing is not populating this field correctly hbpa->setUseSmallScanner(TRUE); + hbpa->setUseSmallScannerForProbes(TRUE); }else if (CmpCommon::getDefault(HBASE_SMALL_SCANNER) == DF_ON) + { hbpa->setUseSmallScanner(TRUE); + hbpa->setUseSmallScannerForProbes(TRUE); + } + hbpa->setMaxNumRowsPerHbaseBlock(hbaseBlockSize/hbaseRowSize); } + http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/optimizer/NATable.cpp ---------------------------------------------------------------------- diff --git a/core/sql/optimizer/NATable.cpp b/core/sql/optimizer/NATable.cpp index c01998c..01981db 100644 --- a/core/sql/optimizer/NATable.cpp +++ b/core/sql/optimizer/NATable.cpp @@ -8736,8 +8736,8 @@ NATableDB::free_entries_with_QI_key(Int32 numKeys, SQL_QIKEY* qiKeyArray) { NATable * currTable = cachedTableList_[currIndx]; - // Only need to remove seabase tables - if (!currTable->isSeabaseTable()) + // Only need to remove seabase tables and external Hive/hbase tables + if (!currTable->isSeabaseTable() && !currTable->hasExternalTable()) { currIndx++; continue; http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/parser/ElemDDLCol.cpp ---------------------------------------------------------------------- diff --git a/core/sql/parser/ElemDDLCol.cpp b/core/sql/parser/ElemDDLCol.cpp index 3f11cf5..33ee187 100644 --- a/core/sql/parser/ElemDDLCol.cpp +++ b/core/sql/parser/ElemDDLCol.cpp @@ -59,7 +59,6 @@ ElemDDLColDef::ElemDDLColDef( const NAString *columnFamily, const NAString *columnName, NAType * pColumnDataType, - ElemDDLNode * pColDefaultNode, ElemDDLNode * pColAttrList, CollHeap * heap) : ElemDDLNode(ELM_COL_DEF_ELEM), @@ -88,7 +87,8 @@ ElemDDLColDef::ElemDDLColDef( isLobAttrsSpec_(FALSE), lobStorage_(Lob_Invalid_Storage), isSeabaseSerializedSpec_(FALSE), - seabaseSerialized_(FALSE) + seabaseSerialized_(FALSE), + isColDefaultSpec_(FALSE) { // ComASSERT(pColumnDataType NEQ NULL); @@ -152,252 +152,12 @@ ElemDDLColDef::ElemDDLColDef( } } - setChild(INDEX_ELEM_DDL_COL_DEFAULT_VALUE, pColDefaultNode); setChild(INDEX_ELEM_DDL_COL_ATTR_LIST, pColAttrList); // initialize data member pDefault_ - ElemDDLColDefault * pColDefault = NULL; ComBoolean isIdentityColumn = FALSE; - if (pColDefaultNode NEQ NULL) - { - ComASSERT(pColDefaultNode->castToElemDDLColDefault() NEQ NULL); - pColDefault = pColDefaultNode->castToElemDDLColDefault(); - } - if (pColDefault NEQ NULL) - { - switch (pColDefault->getColumnDefaultType()) - { - case ElemDDLColDefault::COL_NO_DEFAULT: - defaultClauseStatus_ = NO_DEFAULT_CLAUSE_SPEC; - break; - case ElemDDLColDefault::COL_DEFAULT: - { - defaultClauseStatus_ = DEFAULT_CLAUSE_SPEC; - - if (pColDefault->getSGOptions()) - { - isIdentityColumn = TRUE; - pSGOptions_ = pColDefault->getSGOptions(); - pSGLocation_ = pColDefault->getSGLocation(); - } - else - { - ComASSERT(pColDefault->getDefaultValueExpr() NEQ NULL); - pDefault_ = pColDefault->getDefaultValueExpr(); - } - - // The cast ItemExpr to ConstValue for (ConstValue *)pDefault_; - // statement below sets arbitary value for the isNULL_. - // Bypass these checks for ID column (basically ITM_IDENTITY). - ConstValue *cvDef = (ConstValue *)pDefault_; - if ((cvDef && !cvDef->isNull()) && (!isIdentityColumn)) - { - const NAType *cvTyp = cvDef->getType(); - NABoolean isAnErrorAlreadyIssued = FALSE; - - if ( cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE ) - { - CharInfo::CharSet defaultValueCS = ((const CharType *)cvTyp)->getCharSet(); - // Always check for INFER_CHARSET setting before the ICAT setting. - NAString inferCharSetFlag; - if (getCharSetInferenceSetting(inferCharSetFlag) == TRUE && - NOT cvDef->isStrLitWithCharSetPrefixSpecified()) - { - if (pColumnDataType->getTypeQualifier() == NA_CHARACTER_TYPE - && ((const CharType *)pColumnDataType)->getCharSet() == CharInfo::UCS2 - && SqlParser_DEFAULT_CHARSET == CharInfo::UCS2 - && defaultValueCS == CharInfo::ISO88591 - ) - { - *SqlParser_Diags << DgSqlCode(-1186) - << DgColumnName(ToAnsiIdentifier(getColumnName())) - << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) - << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); - isAnErrorAlreadyIssued = TRUE; - } - else - { - cvTyp = cvDef -> pushDownType(*columnDataType_, NA_CHARACTER_TYPE); - } - } - else if (CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON && - NOT cvDef->isStrLitWithCharSetPrefixSpecified() && - cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE && - SqlParser_DEFAULT_CHARSET == CharInfo::ISO88591 && - defaultValueCS == CharInfo::UnknownCharSet) - { - cvTyp = cvDef -> pushDownType(*columnDataType_, NA_CHARACTER_TYPE); - } - - } // column default value has character data type - - if (NOT isAnErrorAlreadyIssued && - pColumnDataType->getTypeQualifier() == NA_CHARACTER_TYPE && - cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE && - ( - CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON || - NOT cvDef->isStrLitWithCharSetPrefixSpecified())) - { - const CharType *cdCharType = (const CharType *)pColumnDataType; - const CharType *cvCharType = (const CharType *)cvTyp; - CharInfo::CharSet cdCharSet = cdCharType->getCharSet(); // cd = column definition - CharInfo::CharSet cvCharSet = cvCharType->getCharSet(); // cv = constant value - if (cvCharSet == CharInfo::ISO88591) // default value is a _ISO88591 str lit - { - - } - else if ( (cvCharSet == CharInfo::UNICODE || // default value is a _UCS2 string literal - cvCharSet == CharInfo::UTF8) && // or a _UTF8 string literal - cdCharSet != cvCharSet ) - { - // - // Check to see if all characters in the specified column default - // string literal value can be successfully converted/translated - // to the actual character set of the column. - // - char buf[2032]; // the output buffer - should be big enough - buf[0] = '\0'; - enum cnv_charset eCnvCS = convertCharsetEnum( cdCharSet ); - const char * pInStr = cvDef->getRawText()->data(); - Int32 inStrLen = cvDef->getRawText()->length(); - char * p1stUnstranslatedChar = NULL; - UInt32 outStrLenInBytes = 0; - unsigned charCount = 0; // number of characters translated/converted - Int32 cnvErrStatus = 0; - char *pSubstitutionChar = NULL; - Int32 convFlags = 0; - - if ( cvCharSet == CharInfo::UNICODE ) - { - cnvErrStatus = - UTF16ToLocale - ( cnv_version1 // in - const enum cnv_version version - , pInStr // in - const char *in_bufr - , inStrLen // in - const int in_len - , buf // out - const char *out_bufr - , 2016 // in - const int out_len - , eCnvCS // in - enum cnv_charset charset - , p1stUnstranslatedChar // out - char * & first_untranslated_char - , &outStrLenInBytes // out - unsigned int *output_data_len_p - , convFlags // in - const int cnv_flags - , (Int32)TRUE // in - const int addNullAtEnd_flag - , (Int32)FALSE // in - const int allow_invalids - , &charCount // out - unsigned int * translated_char_cnt_p - , pSubstitutionChar // in - const char *substitution_char - ); - } - else // cvCharSet must be CharInfo::UTF8 - { - cnvErrStatus = - UTF8ToLocale - ( cnv_version1 // in - const enum cnv_version version - , pInStr // in - const char *in_bufr - , inStrLen // in - const int in_len - , buf // out - const char *out_bufr - , 2016 // in - const int out_len - , eCnvCS // in - enum cnv_charset charset - , p1stUnstranslatedChar // out - char * & first_untranslated_char - , &outStrLenInBytes // out - unsigned int *output_data_len_p - , (Int32)TRUE // in - const int addNullAtEnd_flag - , (Int32)FALSE // in - const int allow_invalids - , &charCount // out - unsigned int * translated_char_cnt_p - , pSubstitutionChar // in - const char *substitution_char - ); - } - switch (cnvErrStatus) - { - case 0: // success - case CNV_ERR_NOINPUT: // an empty input string will get this error code - { - ConstValue *pMBStrLitConstValue ; - // convert the string literal saved in cvDef (column default value) - // from UNICODE (e.g. UTF16) to the column character data type - if ( cdCharSet != CharInfo::UNICODE) - { - NAString mbs2(buf, PARSERHEAP()); // note that buf is NULL terminated - pMBStrLitConstValue = - new(PARSERHEAP()) ConstValue ( mbs2 - , cdCharSet // use this for str lit prefix - , CharInfo::DefaultCollation - , CharInfo::COERCIBLE - , PARSERHEAP() - ); - } - else - { - NAWString mbs2((NAWchar*)buf, PARSERHEAP()); // note that buf is NULL terminated - pMBStrLitConstValue = - new(PARSERHEAP()) ConstValue ( mbs2 - , cdCharSet // use this for str lit prefix - , CharInfo::DefaultCollation - , CharInfo::COERCIBLE - , PARSERHEAP() - ); - } - delete pDefault_; // deallocate the old ConstValue object - cvDef = NULL; // do not use cvDef anymore - pDefault_ = pMBStrLitConstValue; - pColDefault->setDefaultValueExpr(pDefault_); - } - break; - case CNV_ERR_INVALID_CHAR: - { - // 1401 == CAT_UNABLE_TO_CONVERT_COLUMN_DEFAULT_VALUE_TO_CHARSET - *SqlParser_Diags << DgSqlCode(-1401) - << DgColumnName(ToAnsiIdentifier(getColumnName())) - << DgString0(CharInfo::getCharSetName(cdCharSet)); - } - break; - case CNV_ERR_BUFFER_OVERRUN: // output buffer not big enough - case CNV_ERR_INVALID_CS: - default: - CMPABORT_MSG("Parser internal logic error"); - break; - } // switch - } - else if(!pColumnDataType->isCompatible(*cvTyp)) - { - if (NOT isAnErrorAlreadyIssued) - { - *SqlParser_Diags << DgSqlCode(-1186) - << DgColumnName(ToAnsiIdentifier(getColumnName())) - << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) - << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); - isAnErrorAlreadyIssued = TRUE; - } - } - } // column has character data type - else - // if interval data type, the default value must have the same - // interval qualifier as the column. - if (NOT isAnErrorAlreadyIssued && - (!pColumnDataType->isCompatible(*cvTyp) || - (pColumnDataType->getTypeQualifier() == NA_INTERVAL_TYPE && - pColumnDataType->getFSDatatype() != cvTyp->getFSDatatype()))) - { - *SqlParser_Diags << DgSqlCode(-1186) - << DgColumnName(ToAnsiIdentifier(getColumnName())) - << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) - << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); - isAnErrorAlreadyIssued = TRUE; - } - } - } - break; - case ElemDDLColDefault::COL_COMPUTED_DEFAULT: - { - defaultClauseStatus_ = DEFAULT_CLAUSE_SPEC; - computedDefaultExpr_ = pColDefault->getComputedDefaultExpr(); - } - break; - default: - CMPABORT_MSG("Parser internal logic error"); - break; - } - } - // // Traverse the list of column attributes to check for duplicate // HEADING clause and duplicate NOT NULL column constraint definition @@ -415,12 +175,12 @@ ElemDDLColDef::ElemDDLColDef( // has specified NOT NULL NOT DROPPABLE for IDENTITY // column. If not specified, then automatically add // it. - if (isIdentityColumn) + if (pSGOptions_) //isIdentityColumn { // if NOT NULL not specified, then specify it here. if(NOT getIsConstraintNotNullSpecified()) isNotNullSpec_ = TRUE; - + // [NOT] DROPPABLE is the only attribute for NOT NULL. if (pConstraintNotNull_) { @@ -431,12 +191,12 @@ ElemDDLColDef::ElemDDLColDef( << DgColumnName(ToAnsiIdentifier(getColumnName())); return; } - else - { - // add the NOT DROPPABLE attribute to the NOT NULL . - pConstraintNotNull_->setConstraintAttributes - (new (PARSERHEAP()) ElemDDLConstraintAttrDroppable(FALSE)); - } + else + { + // add the NOT DROPPABLE attribute to the NOT NULL . + pConstraintNotNull_->setConstraintAttributes + (new (PARSERHEAP()) ElemDDLConstraintAttrDroppable(FALSE)); + } } else { @@ -446,7 +206,7 @@ ElemDDLColDef::ElemDDLColDef( (new (PARSERHEAP()) ElemDDLConstraintAttrDroppable(FALSE)); } } //if isIdentityColumn - + // // All column attributes has been checked and saved. // If there exists a NOT NULL NONDROPPABLE constraint @@ -530,6 +290,255 @@ ElemDDLColDef::setChild(Lng32 index, ExprNode * pChildNode) } void +ElemDDLColDef::setDefaultAttribute(ElemDDLNode * pColDefaultNode) +{ + ElemDDLColDefault * pColDefault = NULL; + ComBoolean isIdentityColumn = FALSE; + + NAType * pColumnDataType = columnDataType_; + + if (pColDefaultNode NEQ NULL) + { + ComASSERT(pColDefaultNode->castToElemDDLColDefault() NEQ NULL); + pColDefault = pColDefaultNode->castToElemDDLColDefault(); + } + + if (pColDefault NEQ NULL) + { + switch (pColDefault->getColumnDefaultType()) + { + case ElemDDLColDefault::COL_NO_DEFAULT: + defaultClauseStatus_ = NO_DEFAULT_CLAUSE_SPEC; + break; + case ElemDDLColDefault::COL_DEFAULT: + { + defaultClauseStatus_ = DEFAULT_CLAUSE_SPEC; + + if (pColDefault->getSGOptions()) + { + isIdentityColumn = TRUE; + pSGOptions_ = pColDefault->getSGOptions(); + pSGLocation_ = pColDefault->getSGLocation(); + } + else + { + ComASSERT(pColDefault->getDefaultValueExpr() NEQ NULL); + pDefault_ = pColDefault->getDefaultValueExpr(); + } + + // The cast ItemExpr to ConstValue for (ConstValue *)pDefault_; + // statement below sets arbitary value for the isNULL_. + // Bypass these checks for ID column (basically ITM_IDENTITY). + ConstValue *cvDef = (ConstValue *)pDefault_; + if ((cvDef && !cvDef->isNull()) && (!isIdentityColumn)) + { + const NAType *cvTyp = cvDef->getType(); + NABoolean isAnErrorAlreadyIssued = FALSE; + + if ( cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE ) + { + CharInfo::CharSet defaultValueCS = ((const CharType *)cvTyp)->getCharSet(); + // Always check for INFER_CHARSET setting before the ICAT setting. + NAString inferCharSetFlag; + if (getCharSetInferenceSetting(inferCharSetFlag) == TRUE && + NOT cvDef->isStrLitWithCharSetPrefixSpecified()) + { + if (pColumnDataType->getTypeQualifier() == NA_CHARACTER_TYPE + && ((const CharType *)pColumnDataType)->getCharSet() == CharInfo::UCS2 + && SqlParser_DEFAULT_CHARSET == CharInfo::UCS2 + && defaultValueCS == CharInfo::ISO88591 + ) + { + *SqlParser_Diags << DgSqlCode(-1186) + << DgColumnName(ToAnsiIdentifier(getColumnName())) + << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) + << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); + isAnErrorAlreadyIssued = TRUE; + } + else + { + cvTyp = cvDef -> pushDownType(*columnDataType_, NA_CHARACTER_TYPE); + } + } + else if (CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON && + NOT cvDef->isStrLitWithCharSetPrefixSpecified() && + cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE && + SqlParser_DEFAULT_CHARSET == CharInfo::ISO88591 && + defaultValueCS == CharInfo::UnknownCharSet) + { + cvTyp = cvDef -> pushDownType(*columnDataType_, NA_CHARACTER_TYPE); + } + + } // column default value has character data type + + if (NOT isAnErrorAlreadyIssued && + pColumnDataType->getTypeQualifier() == NA_CHARACTER_TYPE && + cvTyp->getTypeQualifier() == NA_CHARACTER_TYPE && + ( + CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON || + NOT cvDef->isStrLitWithCharSetPrefixSpecified())) + { + const CharType *cdCharType = (const CharType *)pColumnDataType; + const CharType *cvCharType = (const CharType *)cvTyp; + CharInfo::CharSet cdCharSet = cdCharType->getCharSet(); // cd = column definition + CharInfo::CharSet cvCharSet = cvCharType->getCharSet(); // cv = constant value + if (cvCharSet == CharInfo::ISO88591) // default value is a _ISO88591 str lit + { + + } + else if ( (cvCharSet == CharInfo::UNICODE || // default value is a _UCS2 string literal + cvCharSet == CharInfo::UTF8) && // or a _UTF8 string literal + cdCharSet != cvCharSet ) + { + // + // Check to see if all characters in the specified column default + // string literal value can be successfully converted/translated + // to the actual character set of the column. + // + char buf[2032]; // the output buffer - should be big enough + buf[0] = '\0'; + enum cnv_charset eCnvCS = convertCharsetEnum( cdCharSet ); + const char * pInStr = cvDef->getRawText()->data(); + Int32 inStrLen = cvDef->getRawText()->length(); + char * p1stUnstranslatedChar = NULL; + UInt32 outStrLenInBytes = 0; + unsigned charCount = 0; // number of characters translated/converted + Int32 cnvErrStatus = 0; + char *pSubstitutionChar = NULL; + Int32 convFlags = 0; + + if ( cvCharSet == CharInfo::UNICODE ) + { + cnvErrStatus = + UTF16ToLocale + ( cnv_version1 // in - const enum cnv_version version + , pInStr // in - const char *in_bufr + , inStrLen // in - const int in_len + , buf // out - const char *out_bufr + , 2016 // in - const int out_len + , eCnvCS // in - enum cnv_charset charset + , p1stUnstranslatedChar // out - char * & first_untranslated_char + , &outStrLenInBytes // out - unsigned int *output_data_len_p + , convFlags // in - const int cnv_flags + , (Int32)TRUE // in - const int addNullAtEnd_flag + , (Int32)FALSE // in - const int allow_invalids + , &charCount // out - unsigned int * translated_char_cnt_p + , pSubstitutionChar // in - const char *substitution_char + ); + } + else // cvCharSet must be CharInfo::UTF8 + { + cnvErrStatus = + UTF8ToLocale + ( cnv_version1 // in - const enum cnv_version version + , pInStr // in - const char *in_bufr + , inStrLen // in - const int in_len + , buf // out - const char *out_bufr + , 2016 // in - const int out_len + , eCnvCS // in - enum cnv_charset charset + , p1stUnstranslatedChar // out - char * & first_untranslated_char + , &outStrLenInBytes // out - unsigned int *output_data_len_p + , (Int32)TRUE // in - const int addNullAtEnd_flag + , (Int32)FALSE // in - const int allow_invalids + , &charCount // out - unsigned int * translated_char_cnt_p + , pSubstitutionChar // in - const char *substitution_char + ); + } + switch (cnvErrStatus) + { + case 0: // success + case CNV_ERR_NOINPUT: // an empty input string will get this error code + { + ConstValue *pMBStrLitConstValue ; + // convert the string literal saved in cvDef (column default value) + // from UNICODE (e.g. UTF16) to the column character data type + if ( cdCharSet != CharInfo::UNICODE) + { + NAString mbs2(buf, PARSERHEAP()); // note that buf is NULL terminated + pMBStrLitConstValue = + new(PARSERHEAP()) ConstValue ( mbs2 + , cdCharSet // use this for str lit prefix + , CharInfo::DefaultCollation + , CharInfo::COERCIBLE + , PARSERHEAP() + ); + } + else + { + NAWString mbs2((NAWchar*)buf, PARSERHEAP()); // note that buf is NULL terminated + pMBStrLitConstValue = + new(PARSERHEAP()) ConstValue ( mbs2 + , cdCharSet // use this for str lit prefix + , CharInfo::DefaultCollation + , CharInfo::COERCIBLE + , PARSERHEAP() + ); + } + delete pDefault_; // deallocate the old ConstValue object + cvDef = NULL; // do not use cvDef anymore + pDefault_ = pMBStrLitConstValue; + pColDefault->setDefaultValueExpr(pDefault_); + } + break; + case CNV_ERR_INVALID_CHAR: + { + // 1401 == CAT_UNABLE_TO_CONVERT_COLUMN_DEFAULT_VALUE_TO_CHARSET + *SqlParser_Diags << DgSqlCode(-1401) + << DgColumnName(ToAnsiIdentifier(getColumnName())) + << DgString0(CharInfo::getCharSetName(cdCharSet)); + } + break; + case CNV_ERR_BUFFER_OVERRUN: // output buffer not big enough + case CNV_ERR_INVALID_CS: + default: + CMPABORT_MSG("Parser internal logic error"); + break; + } // switch + } + else if(!pColumnDataType->isCompatible(*cvTyp)) + { + if (NOT isAnErrorAlreadyIssued) + { + *SqlParser_Diags << DgSqlCode(-1186) + << DgColumnName(ToAnsiIdentifier(getColumnName())) + << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) + << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); + isAnErrorAlreadyIssued = TRUE; + } + } + } // column has character data type + else + // if interval data type, the default value must have the same + // interval qualifier as the column. + if (NOT isAnErrorAlreadyIssued && + (!pColumnDataType->isCompatible(*cvTyp) || + (pColumnDataType->getTypeQualifier() == NA_INTERVAL_TYPE && + pColumnDataType->getFSDatatype() != cvTyp->getFSDatatype()))) + { + *SqlParser_Diags << DgSqlCode(-1186) + << DgColumnName(ToAnsiIdentifier(getColumnName())) + << DgString0(pColumnDataType->getTypeSQLname(TRUE/*terse*/)) + << DgString1(cvTyp->getTypeSQLname(TRUE/*terse*/)); + isAnErrorAlreadyIssued = TRUE; + } + } + } + break; + case ElemDDLColDefault::COL_COMPUTED_DEFAULT: + { + defaultClauseStatus_ = DEFAULT_CLAUSE_SPEC; + computedDefaultExpr_ = pColDefault->getComputedDefaultExpr(); + } + break; + default: + CMPABORT_MSG("Parser internal logic error"); + break; + } + } + +} + +void ElemDDLColDef::setColumnAttribute(ElemDDLNode * pColAttr) { switch(pColAttr->getOperatorType()) @@ -564,6 +573,7 @@ ElemDDLColDef::setColumnAttribute(ElemDDLNode * pColAttr) { // Duplicate NOT NULL clauses in column definition. *SqlParser_Diags << DgSqlCode(-3052) + << DgString0("NOT NULL") << DgColumnName(ToAnsiIdentifier(getColumnName())); } isNotNullSpec_ = TRUE; @@ -692,8 +702,9 @@ ElemDDLColDef::setColumnAttribute(ElemDDLNode * pColAttr) if(TRUE == isLobAttrsSpec_) { // Duplicate LOB attrs in column definition. - *SqlParser_Diags << DgSqlCode(-12064) - << DgColumnName(ToAnsiIdentifier(getColumnName())); + *SqlParser_Diags << DgSqlCode(-3052) + << DgString0("LOB") + << DgColumnName(ToAnsiIdentifier(getColumnName())); } isLobAttrsSpec_ = TRUE; @@ -706,9 +717,10 @@ ElemDDLColDef::setColumnAttribute(ElemDDLNode * pColAttr) ComASSERT( NULL NEQ pColAttr->castToElemDDLSeabaseSerialized()) if(TRUE == isSeabaseSerializedSpec_) { - // Duplicate attrs in column definition. - *SqlParser_Diags << DgSqlCode(-12064) - << DgColumnName(ToAnsiIdentifier(getColumnName())); + // Duplicate SERIALIZED attrs in column definition. + *SqlParser_Diags << DgSqlCode(-3052) + << DgString0("SERIALIZED") + << DgColumnName(ToAnsiIdentifier(getColumnName())); } isSeabaseSerializedSpec_ = TRUE; @@ -716,6 +728,22 @@ ElemDDLColDef::setColumnAttribute(ElemDDLNode * pColAttr) } break; + case ELM_COL_DEFAULT_ELEM: + { + ComASSERT( NULL NEQ pColAttr->castToElemDDLColDefault()); + if(TRUE == isColDefaultSpec_) + { + // Duplicate DEFAULT attrs in column definition. + *SqlParser_Diags << DgSqlCode(-3052) + << DgString0("DEFAULT") + << DgColumnName(ToAnsiIdentifier(getColumnName())); + } + + isColDefaultSpec_ = TRUE; + setDefaultAttribute(pColAttr->castToElemDDLColDefault()); + } + break; + default : ABORT("internal logic error"); break; @@ -1555,7 +1583,7 @@ ElemProxyColDef::ElemProxyColDef(QualifiedName *tableName, NAType *type, ElemDDLNode *colAttrs, CollHeap *heap) - : ElemDDLColDef(NULL, &colName, type, NULL, colAttrs, heap), + : ElemDDLColDef(NULL, &colName, type, colAttrs, heap), tableName_(tableName) { } http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/c86d2385/core/sql/parser/ElemDDLColDef.h ---------------------------------------------------------------------- diff --git a/core/sql/parser/ElemDDLColDef.h b/core/sql/parser/ElemDDLColDef.h index a0f9e00..2f275d0 100644 --- a/core/sql/parser/ElemDDLColDef.h +++ b/core/sql/parser/ElemDDLColDef.h @@ -74,13 +74,14 @@ public: enum defaultClauseStatusType { DEFAULT_CLAUSE_NOT_SPEC = 0, NO_DEFAULT_CLAUSE_SPEC = 1, DEFAULT_CLAUSE_SPEC = 2}; - + enum { INDEX_ELEM_DDL_COL_ATTR_LIST = 0, + MAX_ELEM_DDL_COL_DEF_ARITY = 1}; + // default constructor ElemDDLColDef( const NAString * columnFamily, const NAString * columnName, NAType * pColumnDataType, - ElemDDLNode * pColDefaultValue = NULL, ElemDDLNode * pColAttrList = NULL, CollHeap * heap = PARSERHEAP()); @@ -116,6 +117,8 @@ public: void setDefaultClauseStatus(defaultClauseStatusType d) { defaultClauseStatus_ = d; } + void setDefaultAttribute(ElemDDLNode * pColDefaultNode); + inline const defaultClauseStatusType getDefaultClauseStatus() const; // Currently, only three cases are available: @@ -201,6 +204,8 @@ public: NABoolean isSerializedSpecified() { return isSeabaseSerializedSpec_; } inline NABoolean isSeabaseSerialized() { return seabaseSerialized_; } + NABoolean isColDefaultSpecified() { return isColDefaultSpec_; } + // // methods for tracing // @@ -292,10 +297,6 @@ private: // Column Attributes list includes column constraint definitions // and column heading specification (HEADING clause). // - enum { INDEX_ELEM_DDL_COL_DEFAULT_VALUE = 0, - INDEX_ELEM_DDL_COL_ATTR_LIST, - MAX_ELEM_DDL_COL_DEF_ARITY }; - ElemDDLNode * children_[MAX_ELEM_DDL_COL_DEF_ARITY]; ComColumnDirection direction_; // IN / OUT / INOUT @@ -307,6 +308,7 @@ private: NABoolean isSeabaseSerializedSpec_; NABoolean seabaseSerialized_; + NABoolean isColDefaultSpec_; }; // class ElemDDLColDef // -----------------------------------------------------------------------
