[ 
https://issues.apache.org/jira/browse/PHOENIX-4010?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16099296#comment-16099296
 ] 

ASF GitHub Bot commented on PHOENIX-4010:
-----------------------------------------

Github user JamesRTaylor commented on a diff in the pull request:

    https://github.com/apache/phoenix/pull/268#discussion_r129185421
  
    --- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java ---
    @@ -394,4 +420,82 @@ public static String idToString(byte[] uuid) {
             }
             return regionStartKey;
         }
    +
    +    public boolean addServerCache(byte[] startkeyOfRegion, ServerCache 
cache, HashCacheFactory cacheFactory,
    +             byte[] txState, PTable pTable) throws Exception {
    +        HTableInterface table = null;
    +        byte[] cacheId = cache.getId();
    +        try {
    +            ConnectionQueryServices services = 
connection.getQueryServices();
    +            byte[] tableName = pTable.getPhysicalName().getBytes();
    +            table = services.getTable(tableName);
    +            boolean success = addServerCache(table, startkeyOfRegion, 
pTable, cacheId, cache.getCachePtr(), cacheFactory, txState);
    +            //track keys so that we can remove the hash table cache from 
the new regionservers where the cache was re-sent
    +            cache.addKey(startkeyOfRegion);
    +            return success;
    +        } finally {
    +            Closeables.closeQuietly(table);
    +        }
    +    }
    +    
    +    public boolean addServerCache(HTableInterface htable, byte[] key, 
final PTable cacheUsingTable, final byte[] cacheId,
    +            final ImmutableBytesWritable cachePtr, final 
ServerCacheFactory cacheFactory, final byte[] txState)
    +            throws Exception {
    +        byte[] keyInRegion = getKeyInRegion(key);
    +        final Map<byte[], AddServerCacheResponse> results;
    +        try {
    +            results = 
htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion,
    +                    new Batch.Call<ServerCachingService, 
AddServerCacheResponse>() {
    +                        @Override
    +                        public AddServerCacheResponse 
call(ServerCachingService instance) throws IOException {
    +                            ServerRpcController controller = new 
ServerRpcController();
    +                            BlockingRpcCallback<AddServerCacheResponse> 
rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
    +                            AddServerCacheRequest.Builder builder = 
AddServerCacheRequest.newBuilder();
    +                            final byte[] tenantIdBytes;
    +                            if (cacheUsingTable.isMultiTenant()) {
    +                                try {
    +                                    tenantIdBytes = 
connection.getTenantId() == null ? null
    +                                            : 
ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
    +                                                    
cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
    +                                                    
cacheUsingTable.getViewIndexId() != null);
    +                                } catch (SQLException e) {
    +                                    throw new IOException(e);
    +                                }
    +                            } else {
    +                                tenantIdBytes = connection.getTenantId() 
== null ? null
    +                                        : 
connection.getTenantId().getBytes();
    +                            }
    +                            if (tenantIdBytes != null) {
    +                                
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
    +                            }
    +                            builder.setCacheId(ByteStringer.wrap(cacheId));
    +                            
builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
    +                            builder.setHasProtoBufIndexMaintainer(true);
    +                            
ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = 
ServerCacheFactoryProtos.ServerCacheFactory
    +                                    .newBuilder();
    +                            
svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
    +                            
builder.setCacheFactory(svrCacheFactoryBuider.build());
    +                            builder.setTxState(ByteStringer.wrap(txState));
    +                            instance.addServerCache(controller, 
builder.build(), rpcCallback);
    +                            if (controller.getFailedOn() != null) { throw 
controller.getFailedOn(); }
    +                            return rpcCallback.get();
    +                        }
    +                    });
    +        } catch (Throwable t) {
    +            throw new Exception(t);
    +        }
    +        if (results != null && results.size() == 1) { return 
results.values().iterator().next().getReturn(); }
    +        return false;
    +
    +    }
    +    
    +    public static ServerCache getCacheForId(List<ServerCache> caches, Long 
cacheId) {
    --- End diff --
    
    How about using a Map<ImmutableBytesPtr,ServerCache> instead of a List and 
switching id to ImmutableBytesPtr? Then you can get rid of this static method.


> Hash Join cache may not be send to all regionservers when we have stale HBase 
> meta cache
> ----------------------------------------------------------------------------------------
>
>                 Key: PHOENIX-4010
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-4010
>             Project: Phoenix
>          Issue Type: Bug
>            Reporter: Ankit Singhal
>            Assignee: Ankit Singhal
>             Fix For: 4.12.0
>
>         Attachments: PHOENIX-4010.patch, PHOENIX-4010_v1.patch, 
> PHOENIX-4010_v2.patch, PHOENIX-4010_v2_rebased_1.patch, 
> PHOENIX-4010_v2_rebased.patch
>
>
>  If the region locations changed and our HBase meta cache is not updated then 
> we might not be sending hash join cache to all region servers hosting the 
> regions.
> ConnectionQueryServicesImpl#getAllTableRegions
> {code}
> boolean reload =false;
>         while (true) {
>             try {
>                 // We could surface the package projected 
> HConnectionImplementation.getNumberOfCachedRegionLocations
>                 // to get the sizing info we need, but this would require a 
> new class in the same package and a cast
>                 // to this implementation class, so it's probably not worth 
> it.
>                 List<HRegionLocation> locations = Lists.newArrayList();
>                 byte[] currentKey = HConstants.EMPTY_START_ROW;
>                 do {
>                     HRegionLocation regionLocation = 
> connection.getRegionLocation(
>                             TableName.valueOf(tableName), currentKey, reload);
>                     locations.add(regionLocation);
>                     currentKey = regionLocation.getRegionInfo().getEndKey();
>                 } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
>                 return locations;
> {code}
> Skipping duplicate servers in ServerCacheClient#addServerCache
> {code}
> List<HRegionLocation> locations = 
> services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
>             int nRegions = locations.size();
>             
> .....
>  if ( ! servers.contains(entry) && 
>                         keyRanges.intersectRegion(regionStartKey, 
> regionEndKey,
>                                 cacheUsingTable.getIndexType() == 
> IndexType.LOCAL)) {  
>                     // Call RPC once per server
>                     servers.add(entry);
> {code}
> For eg:- Table ’T’ has two regions R1 and R2 originally hosted on 
> regionserver RS1. 
> while Phoenix/Hbase connection is still active, R2 is transitioned to RS2 ,  
> but stale meta cache will still give old region locations i.e R1 and R2 on 
> RS1 and when we start copying hash table, we copy for R1 and skip R2 as they 
> are hosted on same regionserver. so, the query on a table will fail as it 
> will unable to find hash table cache on RS2 for processing regions R2.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to