[
https://issues.apache.org/jira/browse/PHOENIX-6560?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17710995#comment-17710995
]
ASF GitHub Bot commented on PHOENIX-6560:
-----------------------------------------
stoty commented on code in PR #1586:
URL: https://github.com/apache/phoenix/pull/1586#discussion_r1162919144
##########
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java:
##########
@@ -500,18 +522,44 @@ else if (!clean){
}
}
}
+
+
private void forcefullyDropView(PhoenixConnection phoenixConnection,
Key key) throws Exception {
- String deleteRowsFromCatalog = "DELETE FROM " + SYSTEM_CATALOG_NAME +
- " WHERE " + TENANT_ID + (key.getTenantId() == null ? " IS
NULL" : " = '" + key.getTenantId() + "'") + " AND " +
- TABLE_SCHEM + (key.getSchemaName() == null ? " IS NULL " : " =
'" + key.getSchemaName() + "'") + " AND " +
- TABLE_NAME + " = '" + key.getTableName() + "'";
- String deleteRowsFromChildLink = "DELETE FROM " +
SYSTEM_CHILD_LINK_NAME +
- " WHERE " + COLUMN_NAME + (key.getTenantId() == null ? " IS
NULL" : " = '" + key.getTenantId() + "'") + " AND " +
- COLUMN_FAMILY + " = '" + (key.getSchemaName() == null ?
key.getTableName() : key.getSchemaName() + "." + key.getTableName()) + "'";
+ String deleteRowsFromCatalog = String.format("DELETE FROM " +
SYSTEM_CATALOG_NAME
+ + " WHERE " + TENANT_ID + " %s AND " + TABLE_SCHEM + " %s
AND "
+ + TABLE_NAME + " = ? ",
+ key.getTenantId() == null ? " IS NULL" : " = ? ",
+ key.getSchemaName() == null ? " IS NULL " : " = ? ");
+ String deleteRowsFromChildLink = String.format("DELETE FROM " +
SYSTEM_CHILD_LINK_NAME
+ + " WHERE " + COLUMN_NAME + " %s AND " + COLUMN_FAMILY + " =
? ",
+ key.getTenantId() == null ? " IS NULL" : " = ? ");
try {
- phoenixConnection.createStatement().execute(deleteRowsFromCatalog);
-
phoenixConnection.createStatement().execute(deleteRowsFromChildLink);
+ try (PreparedStatement delSysCat =
+ phoenixConnection.prepareStatement(deleteRowsFromCatalog)) {
+ int param = 0;
+ if (key.getTenantId() != null) {
+ delSysCat.setString(++param, key.getTenantId());
+ }
+ if (key.getSchemaName() != null) {
+ delSysCat.setString(++param, key.getSchemaName());
+ }
+ delSysCat.setString(++param, key.getTableName());
+ delSysCat.execute();
+ }
+ try (PreparedStatement delChLink =
+ phoenixConnection.prepareStatement(deleteRowsFromChildLink)) {
+ int param = 0;
+ if (key.getTenantId() != null) {
+ delChLink.setString(++param, key.getTenantId());
+ }
+ if (key.getSchemaName() == null) {
+ delChLink.setString(++param, key.getTableName());
Review Comment:
This looks looks too similar to the is null case.
It would be more readable without the if, and using the original tenary
operator for the value instead.
##########
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java:
##########
@@ -500,18 +522,44 @@ else if (!clean){
}
}
}
+
+
private void forcefullyDropView(PhoenixConnection phoenixConnection,
Key key) throws Exception {
- String deleteRowsFromCatalog = "DELETE FROM " + SYSTEM_CATALOG_NAME +
- " WHERE " + TENANT_ID + (key.getTenantId() == null ? " IS
NULL" : " = '" + key.getTenantId() + "'") + " AND " +
- TABLE_SCHEM + (key.getSchemaName() == null ? " IS NULL " : " =
'" + key.getSchemaName() + "'") + " AND " +
- TABLE_NAME + " = '" + key.getTableName() + "'";
- String deleteRowsFromChildLink = "DELETE FROM " +
SYSTEM_CHILD_LINK_NAME +
- " WHERE " + COLUMN_NAME + (key.getTenantId() == null ? " IS
NULL" : " = '" + key.getTenantId() + "'") + " AND " +
- COLUMN_FAMILY + " = '" + (key.getSchemaName() == null ?
key.getTableName() : key.getSchemaName() + "." + key.getTableName()) + "'";
+ String deleteRowsFromCatalog = String.format("DELETE FROM " +
SYSTEM_CATALOG_NAME
+ + " WHERE " + TENANT_ID + " %s AND " + TABLE_SCHEM + " %s
AND "
+ + TABLE_NAME + " = ? ",
+ key.getTenantId() == null ? " IS NULL" : " = ? ",
+ key.getSchemaName() == null ? " IS NULL " : " = ? ");
+ String deleteRowsFromChildLink = String.format("DELETE FROM " +
SYSTEM_CHILD_LINK_NAME
+ + " WHERE " + COLUMN_NAME + " %s AND " + COLUMN_FAMILY + " =
? ",
+ key.getTenantId() == null ? " IS NULL" : " = ? ");
try {
- phoenixConnection.createStatement().execute(deleteRowsFromCatalog);
-
phoenixConnection.createStatement().execute(deleteRowsFromChildLink);
+ try (PreparedStatement delSysCat =
+ phoenixConnection.prepareStatement(deleteRowsFromCatalog)) {
+ int param = 0;
+ if (key.getTenantId() != null) {
+ delSysCat.setString(++param, key.getTenantId());
+ }
+ if (key.getSchemaName() != null) {
+ delSysCat.setString(++param, key.getSchemaName());
+ }
+ delSysCat.setString(++param, key.getTableName());
+ delSysCat.execute();
+ }
+ try (PreparedStatement delChLink =
+ phoenixConnection.prepareStatement(deleteRowsFromChildLink)) {
+ int param = 0;
+ if (key.getTenantId() != null) {
Review Comment:
These are in the wrong order.
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/transform/Transform.java:
##########
@@ -472,25 +473,41 @@ public static void doCutover(PhoenixConnection
connection, SystemTransformRecord
getMetadataDifference(connection, systemTransformRecord, columnNames,
columnValues);
// TODO In the future, we need to handle rowkey changes and column
type changes as well
- String
- changeViewStmt = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID,
TABLE_SCHEM, TABLE_NAME %s) VALUES (%s, %s, '%s' %s)";
+ String changeViewStmt = "UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME %s) VALUES (?, ?, ? %s)";
String
- changeTable = String.format(
- "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM,
TABLE_NAME, PHYSICAL_TABLE_NAME %s) VALUES (%s, %s, '%s','%s' %s)",
- (columnNames.size() > 0? "," + String.join(",",
columnNames):""),
- (tenantId==null? null: ("'" + tenantId + "'")),
- (schema==null ? null : ("'" + schema + "'")), tableName,
newTableName,
- (columnValues.size() > 0? "," + String.join(",",
columnValues):""));
+ changeTable = String.format("UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, PHYSICAL_TABLE_NAME %s
) "
+ + "VALUES(?, ?, ?, ? %s)", columnNames.size() > 0 ? ","
+ + String.join(",", columnNames) : "",
Review Comment:
This is another case, when we should generate ? s and call setString for
each element,
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java:
##########
@@ -1164,19 +1164,23 @@ private long updateStatisticsInternal(PName
physicalName, PTable logicalTable, M
private long updateStatisticsInternal(PName physicalName, PTable
logicalTable, Map<String, Object> statsProps, List<byte[]> cfs, boolean
checkLastStatsUpdateTime) throws SQLException {
ReadOnlyProps props = connection.getQueryServices().getProps();
final long msMinBetweenUpdates = props
- .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
QueryServicesOptions.DEFAULT_MIN_STATS_UPDATE_FREQ_MS);
+ .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
+ QueryServicesOptions.DEFAULT_MIN_STATS_UPDATE_FREQ_MS);
Long scn = connection.getSCN();
// Always invalidate the cache
long clientTimeStamp = connection.getSCN() == null ?
HConstants.LATEST_TIMESTAMP : scn;
long msSinceLastUpdate = Long.MAX_VALUE;
if (checkLastStatsUpdateTime) {
- String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME +
" FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
- + " WHERE " + PHYSICAL_NAME + "='" +
physicalName.getString() + "' AND " + COLUMN_FAMILY
- + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT
NULL";
- ResultSet rs = connection.createStatement().executeQuery(query);
-
- if (rs.next()) {
- msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
+ String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME +
" FROM "
+ + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+ + " WHERE " + PHYSICAL_NAME + "= ? AND " + COLUMN_FAMILY
+ + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
+ try (PreparedStatement stmt = connection.prepareStatement(query)) {
Review Comment:
please use a unique name
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/transform/Transform.java:
##########
@@ -472,25 +473,41 @@ public static void doCutover(PhoenixConnection
connection, SystemTransformRecord
getMetadataDifference(connection, systemTransformRecord, columnNames,
columnValues);
// TODO In the future, we need to handle rowkey changes and column
type changes as well
- String
- changeViewStmt = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID,
TABLE_SCHEM, TABLE_NAME %s) VALUES (%s, %s, '%s' %s)";
+ String changeViewStmt = "UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME %s) VALUES (?, ?, ? %s)";
String
- changeTable = String.format(
- "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM,
TABLE_NAME, PHYSICAL_TABLE_NAME %s) VALUES (%s, %s, '%s','%s' %s)",
- (columnNames.size() > 0? "," + String.join(",",
columnNames):""),
- (tenantId==null? null: ("'" + tenantId + "'")),
- (schema==null ? null : ("'" + schema + "'")), tableName,
newTableName,
- (columnValues.size() > 0? "," + String.join(",",
columnValues):""));
+ changeTable = String.format("UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, PHYSICAL_TABLE_NAME %s
) "
+ + "VALUES(?, ?, ?, ? %s)", columnNames.size() > 0 ? ","
+ + String.join(",", columnNames) : "",
+ columnNames.size() > 0 ? "," +
QueryUtil.getDynamicParams(columnValues.size()) : "");
LOGGER.info("About to do cutover via " + changeTable);
TableViewFinderResult childViewsResult =
ViewUtil.findChildViews(connection, tenantId, schema, tableName);
boolean wasCommit = connection.getAutoCommit();
connection.setAutoCommit(false);
List<TableInfo> viewsToUpdateCache = new ArrayList<>();
try {
- connection.createStatement().execute(changeTable);
-
+ try (PreparedStatement stmt =
connection.prepareStatement(changeTable)) {
Review Comment:
use a uniqie name
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/transform/Transform.java:
##########
@@ -472,25 +473,41 @@ public static void doCutover(PhoenixConnection
connection, SystemTransformRecord
getMetadataDifference(connection, systemTransformRecord, columnNames,
columnValues);
// TODO In the future, we need to handle rowkey changes and column
type changes as well
- String
- changeViewStmt = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID,
TABLE_SCHEM, TABLE_NAME %s) VALUES (%s, %s, '%s' %s)";
+ String changeViewStmt = "UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME %s) VALUES (?, ?, ? %s)";
String
- changeTable = String.format(
- "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM,
TABLE_NAME, PHYSICAL_TABLE_NAME %s) VALUES (%s, %s, '%s','%s' %s)",
- (columnNames.size() > 0? "," + String.join(",",
columnNames):""),
- (tenantId==null? null: ("'" + tenantId + "'")),
- (schema==null ? null : ("'" + schema + "'")), tableName,
newTableName,
- (columnValues.size() > 0? "," + String.join(",",
columnValues):""));
+ changeTable = String.format("UPSERT INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, PHYSICAL_TABLE_NAME %s
) "
+ + "VALUES(?, ?, ?, ? %s)", columnNames.size() > 0 ? ","
+ + String.join(",", columnNames) : "",
+ columnNames.size() > 0 ? "," +
QueryUtil.getDynamicParams(columnValues.size()) : "");
LOGGER.info("About to do cutover via " + changeTable);
TableViewFinderResult childViewsResult =
ViewUtil.findChildViews(connection, tenantId, schema, tableName);
boolean wasCommit = connection.getAutoCommit();
connection.setAutoCommit(false);
List<TableInfo> viewsToUpdateCache = new ArrayList<>();
try {
- connection.createStatement().execute(changeTable);
-
+ try (PreparedStatement stmt =
connection.prepareStatement(changeTable)) {
+ int param = 0;
+ if (tenantId == null) {
+ stmt.setNull(++param, Types.VARCHAR);
Review Comment:
Interesting.
setString(x, null) doesn't generate a "null" string ?
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -2163,45 +2210,53 @@ public static void
upgradeDescVarLengthRowKeys(PhoenixConnection conn, List<Stri
* @return true if any upgrades were performed and false otherwise.
*/
private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn,
PhoenixConnection globalConn, String physicalName, boolean bypassUpgrade)
throws SQLException {
- String query =
- "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
- "FROM SYSTEM.CATALOG cat1\n" +
- "WHERE COLUMN_NAME IS NULL\n" +
- "AND COLUMN_FAMILY = '" + physicalName + "'\n" +
- "AND LINK_TYPE = " +
LinkType.PHYSICAL_TABLE.getSerializedValue() + "\n" +
- "ORDER BY TENANT_ID";
- ResultSet rs = globalConn.createStatement().executeQuery(query);
- String lastTenantId = null;
- Connection conn = globalConn;
- String url = globalConn.getURL();
- boolean wasUpgraded = false;
- while (rs.next()) {
- String fullTableName = SchemaUtil.getTableName(
+ String query = String.format(
+ "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME"
+ + "FROM SYSTEM.CATALOG cat1"
+ + "WHERE COLUMN_NAME IS NULL"
+ + "AND COLUMN_FAMILY = ? "
+ + "AND LINK_TYPE = ? "
+ + "ORDER BY TENANT_ID", physicalName,
Review Comment:
format parameters not used
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -1823,6 +1823,25 @@ private ViewKey(String tenantId, String schema, String
viewName) {
}
}
+ private static String getTableRVCWithParam(List<String> tableNames) {
+ StringBuilder query = new StringBuilder("(");
+ for (int i = 0; i < tableNames.size(); i += 3) {
+ String tenantId = tableNames.get(i);
+ String schemaName = tableNames.get(i + 1);
+ String tableName = tableNames.get(i + 2);
+ query.append('(');
+ query.append(tenantId == null ? "null" : " ? ");
+ query.append(',');
+ query.append(schemaName == null ? "null" : " ? ");
+ query.append(',');
+ query.append("'" + tableName + "'");
Review Comment:
Why is this not parametrized ?
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -2163,45 +2210,53 @@ public static void
upgradeDescVarLengthRowKeys(PhoenixConnection conn, List<Stri
* @return true if any upgrades were performed and false otherwise.
*/
private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn,
PhoenixConnection globalConn, String physicalName, boolean bypassUpgrade)
throws SQLException {
- String query =
- "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
- "FROM SYSTEM.CATALOG cat1\n" +
- "WHERE COLUMN_NAME IS NULL\n" +
- "AND COLUMN_FAMILY = '" + physicalName + "'\n" +
- "AND LINK_TYPE = " +
LinkType.PHYSICAL_TABLE.getSerializedValue() + "\n" +
- "ORDER BY TENANT_ID";
- ResultSet rs = globalConn.createStatement().executeQuery(query);
- String lastTenantId = null;
- Connection conn = globalConn;
- String url = globalConn.getURL();
- boolean wasUpgraded = false;
- while (rs.next()) {
- String fullTableName = SchemaUtil.getTableName(
+ String query = String.format(
+ "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME"
+ + "FROM SYSTEM.CATALOG cat1"
+ + "WHERE COLUMN_NAME IS NULL"
+ + "AND COLUMN_FAMILY = ? "
+ + "AND LINK_TYPE = ? "
+ + "ORDER BY TENANT_ID", physicalName,
+ LinkType.PHYSICAL_TABLE.getSerializedValue());
+ try (PreparedStatement selSysCatstmt =
globalConn.prepareStatement(query)) {
+ selSysCatstmt.setString(1, physicalName);
+ selSysCatstmt.setByte(2,
LinkType.PHYSICAL_TABLE.getSerializedValue());
+ ResultSet rs = selSysCatstmt.executeQuery();
+ String lastTenantId = null;
+ Connection conn = globalConn;
+ String url = globalConn.getURL();
+ boolean wasUpgraded = false;
+ while (rs.next()) {
+ String fullTableName = SchemaUtil.getTableName(
rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- String tenantId = rs.getString(1);
- if (tenantId != null && !tenantId.equals(lastTenantId)) {
- if (lastTenantId != null) {
- conn.close();
+ String tenantId = rs.getString(1);
+ if (tenantId != null && !tenantId.equals(lastTenantId)) {
+ if (lastTenantId != null) {
+ conn.close();
+ }
+ // Open tenant-specific connection when we find a new one
+ Properties props = new
Properties(globalConn.getClientInfo());
+ props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB,
tenantId);
+ conn = DriverManager.getConnection(url, props);
+ lastTenantId = tenantId;
+ }
+ PTable table = PhoenixRuntime.getTable(conn, fullTableName);
+ String tableTenantId =
+ table.getTenantId() == null ? null :
table.getTenantId().getString();
+ if (Objects.equal(lastTenantId, tableTenantId) &&
!table.rowKeyOrderOptimizable()) {
+ upgradeDescVarLengthRowKeys(upgradeConn, globalConn,
+ table.getSchemaName().getString(),
table.getTableName().getString(), false,
+ bypassUpgrade);
+ wasUpgraded = true;
}
- // Open tenant-specific connection when we find a new one
- Properties props = new Properties(globalConn.getClientInfo());
- props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
- conn = DriverManager.getConnection(url, props);
- lastTenantId = tenantId;
}
- PTable table = PhoenixRuntime.getTable(conn, fullTableName);
- String tableTenantId = table.getTenantId() == null ? null :
table.getTenantId().getString();
- if (Objects.equal(lastTenantId, tableTenantId) &&
!table.rowKeyOrderOptimizable()) {
- upgradeDescVarLengthRowKeys(upgradeConn, globalConn,
table.getSchemaName().getString(), table.getTableName().getString(), false,
bypassUpgrade);
- wasUpgraded = true;
+ rs.close();
+ if (lastTenantId != null) {
+ conn.close();
Review Comment:
Could this be replaced with a try-with-resource ?
##########
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java:
##########
@@ -598,18 +599,27 @@ private Job configureJobForPartialBuild() throws
Exception {
}
private long getMaxRebuildAsyncDate(String schemaName, List<String>
disableIndexes) throws SQLException {
- Long maxRebuilAsyncDate=HConstants.LATEST_TIMESTAMP;
- Long maxDisabledTimeStamp=0L;
- if (disableIndexes == null || disableIndexes.isEmpty()) { return
0; }
+ Long maxRebuilAsyncDate = HConstants.LATEST_TIMESTAMP;
+ Long maxDisabledTimeStamp = 0L;
+ if (disableIndexes == null || disableIndexes.isEmpty()) {
+ return 0;
+ }
List<String> quotedIndexes = new
ArrayList<String>(disableIndexes.size());
for (String index : disableIndexes) {
quotedIndexes.add("'" + index + "'");
}
- try (ResultSet rs = connection.createStatement()
- .executeQuery("SELECT MAX(" + ASYNC_REBUILD_TIMESTAMP +
"),MAX("+INDEX_DISABLE_TIMESTAMP+") FROM " + SYSTEM_CATALOG_NAME + " ("
- + ASYNC_REBUILD_TIMESTAMP + " BIGINT) WHERE " +
TABLE_SCHEM
- + (schemaName != null && schemaName.length() > 0 ?
"='" + schemaName + "'" : " IS NULL")
- + " and " + TABLE_NAME + " IN (" +
StringUtils.join(",", quotedIndexes) + ")")) {
+
+ String query = String.format("SELECT MAX(" +
ASYNC_REBUILD_TIMESTAMP + "), "
+ + "MAX(" + INDEX_DISABLE_TIMESTAMP + ") FROM "
+ + SYSTEM_CATALOG_NAME + " (" + ASYNC_REBUILD_TIMESTAMP
+ + " BIGINT) WHERE " + TABLE_SCHEM + " %s AND " +
TABLE_NAME + " IN ( %s )",
+ (schemaName != null && schemaName.length() > 0) ? " =
? " : " IS NULL ",
+ StringUtils.join(",", quotedIndexes));
Review Comment:
The safe thing to do here is to generate as many ? as there elements in the
list, and call setXXX for each one.
##########
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java:
##########
@@ -3994,21 +3994,36 @@ protected PhoenixConnection
upgradeSystemCatalogIfRequired(PhoenixConnection met
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0,
PhoenixDatabaseMetaData.TRANSACTION_PROVIDER + " "
+ PTinyint.INSTANCE.getSqlTypeName());
- metaConnection.createStatement().executeUpdate("ALTER TABLE " +
- PhoenixDatabaseMetaData.SYSTEM_CATALOG + " SET " +
- HConstants.VERSIONS + "= " +
props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB,
QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS) + ",\n" +
- ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" +
props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB,
QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)
- );
- metaConnection.createStatement().executeUpdate("ALTER TABLE " +
- PhoenixDatabaseMetaData.SYSTEM_FUNCTION + " SET " +
- TableDescriptorBuilder.SPLIT_POLICY + "='" +
SystemFunctionSplitPolicy.class.getName() + "',\n" +
- HConstants.VERSIONS + "= " +
props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB,
QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS) + ",\n" +
- ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" +
props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB,
QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)
- );
- metaConnection.createStatement().executeUpdate("ALTER TABLE " +
- PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " +
- TableDescriptorBuilder.SPLIT_POLICY + "='" +
SystemStatsSplitPolicy.class.getName() +"'"
- );
+ String altCatQry = "ALTER TABLE "
+ + PhoenixDatabaseMetaData.SYSTEM_CATALOG + " SET "
+ + HConstants.VERSIONS + "= " +
props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB,
+ QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS) + ",\n"
+ + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "="
+ + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB,
+ QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS);
+ try (PreparedStatement altCatQryStmt =
metaConnection.prepareStatement(altCatQry)) {
Review Comment:
No need for PreparedStatements if we don't set any parameters.
We could just put each query in a try-with-resources block that creates a
(non prepared) metaStmt, to make sure that we don't let it hanging.
Just be careful to always re-create metaStmt when replace metaConnection.
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -1823,6 +1823,25 @@ private ViewKey(String tenantId, String schema, String
viewName) {
}
}
+ private static String getTableRVCWithParam(List<String> tableNames) {
+ StringBuilder query = new StringBuilder("(");
+ for (int i = 0; i < tableNames.size(); i += 3) {
+ String tenantId = tableNames.get(i);
+ String schemaName = tableNames.get(i + 1);
+ String tableName = tableNames.get(i + 2);
+ query.append('(');
+ query.append(tenantId == null ? "null" : " ? ");
Review Comment:
wouldn't setNull work here ?
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java:
##########
@@ -4482,8 +4486,9 @@ private String dropColumnMutations(PTable table,
List<PColumn> columnsToDrop) th
}
buf.setCharAt(buf.length()-1, ')');
- connection.createStatement().execute(buf.toString());
-
+ try (PreparedStatement delCol =
connection.prepareStatement(buf.toString())) {
Review Comment:
no need to prepare if we don't use parameters.
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -2031,16 +2067,27 @@ private static void
upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, P
String theTenantId = tableNames.get(i);
String theSchemaName = tableNames.get(i+1);
String theTableName = tableNames.get(i+2);
- globalConn.createStatement().execute("UPSERT INTO " +
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME +
- " (" + PhoenixDatabaseMetaData.TENANT_ID + "," +
- PhoenixDatabaseMetaData.TABLE_SCHEM + "," +
- PhoenixDatabaseMetaData.TABLE_NAME + "," +
- MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE + "
BOOLEAN"
- + ") VALUES (" +
- "'" + (theTenantId == null ?
StringUtil.EMPTY_STRING : theTenantId) + "'," +
- "'" + (theSchemaName == null ?
StringUtil.EMPTY_STRING : theSchemaName) + "'," +
- "'" + theTableName + "'," +
- "TRUE)");
+ String upsSyscat = String.format("UPSERT INTO "
+ + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME
+ + " (" + PhoenixDatabaseMetaData.TENANT_ID + ","
+ + PhoenixDatabaseMetaData.TABLE_SCHEM + ","
+ + PhoenixDatabaseMetaData.TABLE_NAME + ","
+ + MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE + "
BOOLEAN"
+ + ") VALUES ( ?, ?," + "'" + theTableName + "'," +
"TRUE)");
Review Comment:
Why is this not parametrized ?
##########
phoenix-core/src/main/java/org/apache/phoenix/schema/transform/Transform.java:
##########
@@ -520,13 +537,28 @@ public static void doCutover(PhoenixConnection
connection, SystemTransformRecord
int batchSize = 0;
for (TableInfo view : childViewsResult.getLinks()) {
String changeView = String.format(changeViewStmt,
- (columnNames.size() > 0? "," + String.join(",",
columnNames):""),
- (view.getTenantId()==null || view.getTenantId().length
== 0? null: ("'" + Bytes.toString(view.getTenantId()) + "'")),
- (view.getSchemaName()==null ||
view.getSchemaName().length == 0? null : ("'" +
Bytes.toString(view.getSchemaName()) + "'")),
- Bytes.toString(view.getTableName()),
- (columnValues.size() > 0? "," + String.join(",",
columnValues):""));
+ columnNames.size() > 0 ? "," + String.join(",",
columnNames) : "",
Review Comment:
Same list handling as before.
(I think we may already have some helper functions for that)
##########
phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java:
##########
@@ -2563,13 +2618,17 @@ private static void
updateIndexesSequenceIfPresent(PhoenixConnection connection,
String newSchemaName =
MetaDataUtil.getViewIndexSequenceSchemaName(physicalName, true);
String newSequenceName =
MetaDataUtil.getViewIndexSequenceName(physicalName, tenantId, true);
// create new entry with new schema format
- String upsert = "UPSERT INTO " +
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " SELECT NULL,\'" + newSchemaName +
- "\',\'" + newSequenceName
- + "\'," + START_WITH + "," + CURRENT_VALUE + "," +
INCREMENT_BY + "," + CACHE_SIZE + "," + MIN_VALUE
- + "," + MAX_VALUE + "," + CYCLE_FLAG + "," +
LIMIT_REACHED_FLAG + " FROM "
- + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " +
PhoenixDatabaseMetaData.TENANT_ID
- + " IS NULL AND " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA +
" = '" + oldSchemaName + "'";
- connection.createStatement().executeUpdate(upsert);
+ String upsert = "UPSERT INTO " +
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE
+ + " SELECT NULL,\'" + newSchemaName + "\',\'" + newSequenceName
Review Comment:
shouldn't these be parametrized ?
> Rewrite dynamic SQL queries to use Preparedstatement
> ----------------------------------------------------
>
> Key: PHOENIX-6560
> URL: https://issues.apache.org/jira/browse/PHOENIX-6560
> Project: Phoenix
> Issue Type: Improvement
> Components: core
> Reporter: Istvan Toth
> Assignee: Abhishek Kothalikar
> Priority: Major
>
> Most of the Phoenix code base already uses PreparedStatements, and adds all
> potentially vulnerable data as parameters.
> However, there are some places where we concatenate potentially problematic
> strings into the query.
> While most of those are constants and such, we should preferably pass all
> data as parameters to be on the safe side.
> (We still have to use dynamic strings for the preparedstatement strings, for
> handling things as is null, empty in clauses and such)
> Spotbugs marks these with SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE, so
> they're easy to find.
--
This message was sent by Atlassian Jira
(v8.20.10#820010)