Repository: phoenix Updated Branches: refs/heads/master e4e1570b8 -> 77ab7dfa2
PHOENIX-3454 ON DUPLICATE KEY construct doesn't work correctly when using lower case column names Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b157c485 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b157c485 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b157c485 Branch: refs/heads/master Commit: b157c485d5fc821b38319eb3f497063c1e9f0ffa Parents: e4e1570 Author: James Taylor <[email protected]> Authored: Fri Nov 4 18:47:03 2016 -0700 Committer: James Taylor <[email protected]> Committed: Fri Nov 4 19:15:54 2016 -0700 ---------------------------------------------------------------------- .../phoenix/end2end/OnDuplicateKeyIT.java | 37 ++++++++++++++++++++ .../phoenix/index/PhoenixIndexBuilder.java | 13 +++++-- 2 files changed, 48 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/b157c485/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java index 9a81026..d3cb0af 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java @@ -519,5 +519,42 @@ public class OnDuplicateKeyIT extends ParallelStatsDisabledIT { conn.close(); } + @Test + public void testDeleteOnSingleLowerCaseVarcharColumn() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + conn.setAutoCommit(false); + String tableName = generateUniqueName(); + String ddl = " create table " + tableName + "(pk varchar primary key, \"counter1\" varchar, \"counter2\" smallint)"; + conn.createStatement().execute(ddl); + String dml = "UPSERT INTO " + tableName + " VALUES('a','b') ON DUPLICATE KEY UPDATE \"counter1\" = null"; + conn.createStatement().execute(dml); + conn.createStatement().execute(dml); + conn.commit(); + + ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + tableName); + assertTrue(rs.next()); + assertEquals("a",rs.getString(1)); + assertEquals(null,rs.getString(2)); + assertFalse(rs.next()); + + dml = "UPSERT INTO " + tableName + " VALUES('a','b',0)"; + conn.createStatement().execute(dml); + dml = "UPSERT INTO " + tableName + " VALUES('a','b', 0) ON DUPLICATE KEY UPDATE \"counter1\" = null, \"counter2\" = \"counter2\" + 1"; + conn.createStatement().execute(dml); + dml = "UPSERT INTO " + tableName + " VALUES('a','b', 0) ON DUPLICATE KEY UPDATE \"counter1\" = 'c', \"counter2\" = \"counter2\" + 1"; + conn.createStatement().execute(dml); + conn.commit(); + + rs = conn.createStatement().executeQuery("SELECT * FROM " + tableName); + assertTrue(rs.next()); + assertEquals("a",rs.getString(1)); + assertEquals("c",rs.getString(2)); + assertEquals(2,rs.getInt(3)); + assertFalse(rs.next()); + + conn.close(); + } + } http://git-wip-us.apache.org/repos/asf/phoenix/blob/b157c485/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index ac1e2e4..ae0a19f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -31,6 +31,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -192,6 +193,7 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { get.setFilter(new FirstKeyOnlyFilter()); } MultiKeyValueTuple tuple; + List<Cell> flattenedCells = null; List<Cell>cells = ((HRegion)this.env.getRegion()).get(get, false); if (cells.isEmpty()) { if (skipFirstOp) { @@ -201,7 +203,8 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { repeat--; // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE) } // Base current state off of new row - tuple = new MultiKeyValueTuple(flattenCells(inc, estimatedSize)); + flattenedCells = flattenCells(inc, estimatedSize); + tuple = new MultiKeyValueTuple(flattenedCells); } else { // Base current state off of existing row tuple = new MultiKeyValueTuple(cells); @@ -213,6 +216,12 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { List<Expression> expressions = operation.getSecond(); for (int j = 0; j < repeat; j++) { // repeater loop ptr.set(rowKey); + // Sort the list of cells (if they've been flattened in which case they're not necessarily + // ordered correctly). We only need the list sorted if the expressions are going to be + // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop. + if (flattenedCells != null) { + Collections.sort(flattenedCells,KeyValue.COMPARATOR); + } PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); for (int i = 0; i < expressions.size(); i++) { Expression expression = expressions.get(i); @@ -234,7 +243,7 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder { byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); row.setValue(column, bytes); } - List<Cell> flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); + flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); List<Mutation> mutations = row.toRowMutations(); for (Mutation source : mutations) { flattenCells(source, flattenedCells);
