Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 9180ce22d -> ecc157b09


Code clean up


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ecc157b0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ecc157b0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ecc157b0

Branch: refs/heads/encodecolumns2
Commit: ecc157b09150f0cd62afd5820f3acdd3b57d9c44
Parents: 9180ce2
Author: Samarth <samarth.j...@salesforce.com>
Authored: Mon Feb 13 23:57:30 2017 -0800
Committer: Samarth <samarth.j...@salesforce.com>
Committed: Mon Feb 13 23:57:30 2017 -0800

----------------------------------------------------------------------
 .../apache/phoenix/compile/JoinCompiler.java    |  1 -
 .../coprocessor/BaseScannerRegionObserver.java  |  5 +++
 .../GroupedAggregateRegionObserver.java         |  4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |  5 +--
 .../UngroupedAggregateRegionObserver.java       |  2 +-
 .../apache/phoenix/join/HashCacheFactory.java   |  1 -
 .../mapreduce/FormatToBytesWritableMapper.java  |  1 -
 .../apache/phoenix/schema/MetaDataClient.java   |  8 ++--
 .../phoenix/schema/PColumnFamilyImpl.java       | 12 +-----
 .../org/apache/phoenix/schema/PTableImpl.java   | 44 +++++---------------
 .../java/org/apache/phoenix/util/IndexUtil.java |  1 -
 11 files changed, 24 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 9a2651d..eef604b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -1307,7 +1307,6 @@ public class JoinCompiler {
         if (left.getBucketNum() != null) {
             merged.remove(0);
         }
-        //TODO: samarth should projected join table should always have 
non-encoded column names? Is this where we also decide that once we start 
supporting joins then have the storage scheme right.
         return PTableImpl.makePTable(left.getTenantId(), left.getSchemaName(),
                 
PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(), 
right.getName().getString())),
                 left.getType(), left.getIndexState(), left.getTimeStamp(), 
left.getSequenceNumber(), left.getPKName(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 1c479c5..c340216 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -47,6 +47,7 @@ import 
org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.KeyValueSchema;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
@@ -54,6 +55,7 @@ import 
org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple;
 import org.apache.phoenix.schema.tuple.PositionBasedResultTuple;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -114,6 +116,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
     public final static String MAX_QUALIFIER = "_MaxQualifier";
     public final static String QUALIFIER_ENCODING_SCHEME = 
"_QualifierEncodingScheme";
     public final static String IMMUTABLE_STORAGE_ENCODING_SCHEME = 
"_ImmutableStorageEncodingScheme";
+    public final static String USE_ENCODED_COLUMN_QUALIFIER_LIST = 
"_UseEncodedColumnQualifierList";
     
     /**
      * Attribute name used to pass custom annotations in Scans and Mutations 
(later). Custom annotations
@@ -124,6 +127,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
     /** Exposed for testing */
     public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on 
server";
     protected Configuration rawConf;
+    protected QualifierEncodingScheme encodingScheme;
 
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
@@ -196,6 +200,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
             // start exclusive and the stop inclusive.
             ScanUtil.setupReverseScan(scan);
         }
+        this.encodingScheme = 
EncodedColumnsUtil.getQualifierEncodingScheme(scan);
         return s;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 9e82749..da312ae 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -412,7 +412,7 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
                 acquiredLock = true;
                 synchronized (scanner) {
                     do {
-                        List<Cell> results = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), 
EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
+                        List<Cell> results = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                         // Results are potentially returned even when the 
return
                         // value of s.next is false
                         // since this is an indication of whether or not there 
are
@@ -486,7 +486,7 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
                     acquiredLock = true;
                     synchronized (scanner) {
                         do {
-                            List<Cell> kvs = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), 
EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
+                            List<Cell> kvs = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                             // Results are potentially returned even when the 
return
                             // value of s.next is false
                             // since this is an indication of whether or not 
there

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index d4d4d9e..0c063ce 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.coprocessor;
 
 import static 
org.apache.phoenix.util.EncodedColumnsUtil.getMinMaxQualifiersFromScan;
-import static 
org.apache.phoenix.util.EncodedColumnsUtil.getQualifierEncodingScheme;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -233,8 +232,6 @@ public class ScanRegionObserver extends 
BaseScannerRegionObserver {
         
         final TupleProjector p = 
TupleProjector.deserializeProjectorFromScan(scan);
         final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
-        //TODO: samarth make this a client side check by looking at order by 
and group by expressions. Then use that to set min max qualifiers. We can then 
make useQualifierListAsIndex
-        // a member variable of BaseScannerRegionObserver.
         boolean useQualifierAsIndex = 
EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) && 
scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
         innerScanner =
                 getWrappedScanner(c, innerScanner, arrayKVRefs, arrayFuncRefs, 
offset, scan,
@@ -247,7 +244,7 @@ public class ScanRegionObserver extends 
BaseScannerRegionObserver {
         }
         if (scanOffset != null) {
             innerScanner = getOffsetScanner(c, innerScanner,
-                    new OffsetResultIterator(new 
RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), 
getQualifierEncodingScheme(scan)), scanOffset),
+                    new OffsetResultIterator(new 
RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), 
encodingScheme), scanOffset),
                     scan.getAttribute(QueryConstants.LAST_SCAN) != null);
         }
         final OrderedResultIterator iterator = deserializeFromScan(scan, 
innerScanner);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 70ef609..32e7f84 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -409,7 +409,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
             acquiredLock = true;
             synchronized (innerScanner) {
                 do {
-                    List<Cell> results = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), 
EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
+                    List<Cell> results = useQualifierAsIndex ? new 
EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), 
minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                     // Results are potentially returned even when the return 
value of s.next is false
                     // since this is an indication of whether or not there are 
more values after the
                     // ones returned

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
index 1985c2e..921b412 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
@@ -122,7 +122,6 @@ public class HashCacheFactory implements ServerCacheFactory 
{
                     int resultSize = (int)Bytes.readVLong(hashCacheByteArray, 
offset);
                     offset += 
WritableUtils.decodeVIntSize(hashCacheByteArray[offset]);
                     ImmutableBytesWritable value = new 
ImmutableBytesWritable(hashCacheByteArray,offset,resultSize);
-                    //TODO: samarth make joins work with position look up.
                     Tuple result = new ResultTuple(ResultUtil.toResult(value));
                     ImmutableBytesPtr key = 
TupleUtil.getConcatenatedValue(result, onExpressions);
                     List<Tuple> tuples = hashCacheMap.get(key);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 2ef2127..278489d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -222,7 +222,6 @@ public abstract class FormatToBytesWritableMapper<RECORD> 
extends Mapper<LongWri
                     family = c.getFamilyName().getBytes();
                     cq = c.getColumnQualifierBytes();
                 } else {
-                    // TODO: samarth verify if this is the right thing to do 
here.
                     cq = c.getName().getBytes();
                 }
                 byte[] cfn = Bytes.add(family, 
QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 1de57cd..efaeb63 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2339,7 +2339,7 @@ public class MetaDataClient {
                         linkStatement.execute();
                     }
                 }
-                if (tableType == VIEW && !changedCqCounters.isEmpty()) { 
//TODO: samarth think about shared indexes
+                if (tableType == VIEW && !changedCqCounters.isEmpty()) {
                     PreparedStatement incrementStatement = 
connection.prepareStatement(INCREMENT_SEQ_NUM);
                     incrementStatement.setString(1, null);
                     incrementStatement.setString(2, 
viewPhysicalTable.getSchemaName().getString());
@@ -2467,7 +2467,7 @@ public class MetaDataClient {
             } else {
                 tableUpsert.setLong(25, guidePostsWidth);
             }
-            tableUpsert.setByte(26, 
immutableStorageScheme.getSerializedMetadataValue()); //TODO: samarth should 
there be a null check here?
+            tableUpsert.setByte(26, 
immutableStorageScheme.getSerializedMetadataValue());
             tableUpsert.setByte(27, 
encodingScheme.getSerializedMetadataValue());
             tableUpsert.execute();
 
@@ -3380,7 +3380,7 @@ public class MetaDataClient {
                     // too since we want clients to get the latest PTable of 
the base table.
                     if (tableType == VIEW) {
                         PreparedStatement incrementStatement = 
connection.prepareStatement(INCREMENT_SEQ_NUM);
-                        incrementStatement.setString(1, null); //TODO: samarth 
verify that tenant id should be null here
+                        incrementStatement.setString(1, null);
                         incrementStatement.setString(2, 
tableForCQCounters.getSchemaName().getString());
                         incrementStatement.setString(3, 
tableForCQCounters.getTableName().getString());
                         incrementStatement.setLong(4, 
tableForCQCounters.getSequenceNumber() + 1);
@@ -3711,8 +3711,6 @@ public class MetaDataClient {
                         Map<String, List<TableRef>> tenantIdTableRefMap = 
Maps.newHashMap();
                         if (result.getSharedTablesToDelete() != null) {
                             for (SharedTableState sharedTableState : 
result.getSharedTablesToDelete()) {
-                                //TODO: samarth I don't think we really care 
about storage scheme and cq counter at this point.
-                                //Probably worthy to change the constructor 
here to not expect the two arguments.
                                 PTableImpl viewIndexTable = new 
PTableImpl(sharedTableState.getTenantId(),
                                         sharedTableState.getSchemaName(), 
sharedTableState.getTableName(), ts,
                                         table.getColumnFamilies(), 
sharedTableState.getColumns(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
index d1a35cf..453e33b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
@@ -101,20 +101,10 @@ public class PColumnFamilyImpl implements PColumnFamily {
     }
     
     
-    //TODO: FIXME: samarth think about backward compatibility here since older 
tables won't have column qualifiers in their metadata
+    //TODO: samarth think about backward compatibility here
     @Override
     public PColumn getPColumnForColumnQualifier(byte[] cq) throws 
ColumnNotFoundException {
         Preconditions.checkNotNull(cq);
         return columnsByQualifiers.get(cq);
-//        if (encodedColumnQualifiers) {
-//            return columnsByQualifiers.get(cq);
-//        }
-//        /*
-//         * For tables with non-encoded column names, column qualifiers are
-//         * column name bytes. Also dynamic columns don't have encoded column
-//         * qualifiers. So they could be found in the column name by bytes 
map.
-//         */
-//        return getPColumnForColumnNameBytes(cq);
-
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 0816fea..037a4f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -116,7 +116,7 @@ public class PTableImpl implements PTable {
     private Map<byte[], PColumnFamily> familyByBytes;
     private Map<String, PColumnFamily> familyByString;
     private ListMultimap<String, PColumn> columnsByName;
-    private ListMultimap<KVColumnFamilyQualifier, PColumn> 
kvColumnsByQualifiers;
+    private Map<KVColumnFamilyQualifier, PColumn> kvColumnsByQualifiers;
     private PName pkName;
     private Integer bucketNum;
     private RowKeySchema rowKeySchema;
@@ -476,7 +476,7 @@ public class PTableImpl implements PTable {
         PColumn[] allColumns;
         
         this.columnsByName = ArrayListMultimap.create(columns.size(), 1);
-        this.kvColumnsByQualifiers = 
ArrayListMultimap.<KVColumnFamilyQualifier, PColumn>create(columns.size(), 1);
+        this.kvColumnsByQualifiers = 
Maps.newHashMapWithExpectedSize(columns.size());
         int numPKColumns = 0;
         if (bucketNum != null) {
             // Add salt column to allColumns and pkColumns, but don't add to
@@ -511,18 +511,11 @@ public class PTableImpl implements PTable {
             String cf = column.getFamilyName() != null ? 
column.getFamilyName().getString() : null;
             if (cf != null && cq != null) {
                 KVColumnFamilyQualifier info = new KVColumnFamilyQualifier(cf, 
cq);
-                if (kvColumnsByQualifiers.put(info, column)) {
-                    int count = 0;
-                    for (PColumn dupColumn : kvColumnsByQualifiers.get(info)) {
-                        if (Objects.equal(familyName, 
dupColumn.getFamilyName())) {
-                            count++;
-                            if (count > 1) {
-                                throw new 
ColumnAlreadyExistsException(schemaName.getString(),
-                                        name.getString(), columnName);
-                            }
-                        }
-                    }
+                if (kvColumnsByQualifiers.get(info) != null) {
+                    throw new 
ColumnAlreadyExistsException(schemaName.getString(),
+                            name.getString(), columnName);
                 }
+                kvColumnsByQualifiers.put(info, column);
             }
         }
         estimatedSize += SizedUtil.sizeOfMap(allColumns.length, 
SizedUtil.POINTER_SIZE, SizedUtil.sizeOfArrayList(1)); // for multi-map
@@ -829,26 +822,11 @@ public class PTableImpl implements PTable {
             return getPColumnForColumnName(columnName);
         } else {
             String family = (String)PVarchar.INSTANCE.toObject(cf);
-            List<PColumn> columns = kvColumnsByQualifiers.get(new 
KVColumnFamilyQualifier(family, cq));
-            int size = columns.size();
-            if (size == 0) {
-                //TODO: samarth should we have a column qualifier not found 
exception?
-                throw new ColumnNotFoundException(Bytes.toString(cq));
+            PColumn col = kvColumnsByQualifiers.get(new 
KVColumnFamilyQualifier(family, cq));
+            if (col == null) {
+                throw new ColumnNotFoundException("No column found for column 
qualifier " + qualifierEncodingScheme.decode(cq));
             }
-            //TODO: samarth I am not convinced if need this logic.
-//            if (size > 1) {
-//                for (PColumn column : columns) {
-//                    if 
(QueryConstants.DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().getString()))
 {
-//                        // Allow ambiguity with PK column or column in the 
default column family,
-//                        // since a PK column cannot be prefixed and a user 
would not know how to
-//                        // prefix a column in the default column family.
-//                        return column;
-//                    }
-//                }
-//                //TODO: samarth should we have a column qualifier not found 
exception?
-//                throw new 
AmbiguousColumnException(columns.get(0).getName().getString());
-//            }
-            return columns.get(0);
+            return col;
         }
     }
 
@@ -885,7 +863,7 @@ public class PTableImpl implements PTable {
                 this.keyPtr =  new ImmutableBytesPtr(key);
                 this.key = ByteUtil.copyKeyBytesIfNecessary(key);
             }
-            this.columnToValueMap = Maps.newHashMapWithExpectedSize(1);//TODO: 
samarth size it properly
+            this.columnToValueMap = Maps.newHashMapWithExpectedSize(1);
             newMutations();
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 9c7f9ba..3a75d10 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -440,7 +440,6 @@ public class IndexUtil {
             KeyValueSchema keyValueSchema = 
deserializeLocalIndexJoinSchemaFromScan(scan); 
             boolean storeColsInSingleCell = 
scan.getAttribute(BaseScannerRegionObserver.COLUMNS_STORED_IN_SINGLE_CELL) != 
null;
             QualifierEncodingScheme encodingScheme = 
EncodedColumnsUtil.getQualifierEncodingScheme(scan);
-            ImmutableStorageScheme immutableStorageScheme = 
EncodedColumnsUtil.getImmutableStorageScheme(scan);
             Expression[] colExpressions = storeColsInSingleCell ? new 
SingleCellColumnExpression[dataColumns.length] : new 
KeyValueColumnExpression[dataColumns.length];
             for (int i = 0; i < dataColumns.length; i++) {
                 byte[] family = dataColumns[i].getFamily();

Reply via email to