[ 
https://issues.apache.org/jira/browse/PHOENIX-3534?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16533111#comment-16533111
 ] 

ASF GitHub Bot commented on PHOENIX-3534:
-----------------------------------------

Github user JamesRTaylor commented on a diff in the pull request:

    https://github.com/apache/phoenix/pull/303#discussion_r200205809
  
    --- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
    @@ -586,48 +590,359 @@ public void getTable(RpcController controller, 
GetTableRequest request,
                         
builder.setMutationTime(minNonZerodisableIndexTimestamp - 1);
                     }
                 }
    -
    -            if (table.getTimeStamp() != tableTimeStamp) {
    +            // the PTable of views and indexes on views might get updated 
because a column is added to one of
    +            // their parents (this won't change the timestamp)
    +            if (table.getType()!=PTableType.TABLE || table.getTimeStamp() 
!= tableTimeStamp) {
                     builder.setTable(PTableImpl.toProto(table));
                 }
                 done.run(builder.build());
    -            return;
             } catch (Throwable t) {
                 logger.error("getTable failed", t);
                 ProtobufUtil.setControllerException(controller,
                     
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), 
t));
             }
         }
     
    +    /**
    +     * Used to add the columns present the ancestor hierarchy to the 
PTable of the given view or
    +     * view index
    +     * @param table PTable of the view or view index
    +     * @param skipAddingIndexes if true the returned PTable won't include 
indexes
    +     * @param skipAddingParentColumns if true the returned PTable won't 
include columns derived from
    +     *            ancestor tables
    +     * @param lockedAncestorTable ancestor table table that is being 
mutated (as we won't be able to
    +     *            resolve this table as its locked)
    +     */
    +    private Pair<PTable, MetaDataProtos.MutationCode> 
combineColumns(PTable table, long timestamp,
    +            int clientVersion, boolean skipAddingIndexes, boolean 
skipAddingParentColumns,
    +            PTable lockedAncestorTable) throws SQLException, IOException {
    +        boolean hasIndexId = table.getViewIndexId() != null;
    +        if (table.getType() != PTableType.VIEW && !hasIndexId) {
    +            return new Pair<PTable, MetaDataProtos.MutationCode>(table,
    +                    MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
    +        }
    +        if (!skipAddingParentColumns) {
    +            table =
    +                    addDerivedColumnsFromAncestors(table, timestamp, 
clientVersion,
    +                        lockedAncestorTable);
    +            if (table==null) {
    +                return new Pair<PTable, MetaDataProtos.MutationCode>(table,
    +                        MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
    +            }
    +            // we need to resolve the indexes of views (to get ensure they 
also have all the columns
    +            // derived from their ancestors) 
    +            if (!skipAddingIndexes && !table.getIndexes().isEmpty()) {
    +                List<PTable> indexes = 
Lists.newArrayListWithExpectedSize(table.getIndexes().size());
    +                for (PTable index : table.getIndexes()) {
    +                    byte[] tenantIdBytes =
    +                            index.getTenantId() == null ? 
ByteUtil.EMPTY_BYTE_ARRAY
    +                                    : index.getTenantId().getBytes();
    +                    PTable latestIndex =
    +                            doGetTable(tenantIdBytes, 
index.getSchemaName().getBytes(),
    +                                index.getTableName().getBytes(), 
timestamp, null, clientVersion, true,
    +                                false, lockedAncestorTable);
    +                    if (latestIndex == null) {
    +                        throw new TableNotFoundException(
    +                                "Could not find index table while 
combining columns "
    +                                        + index.getTableName().getString() 
+ " with tenant id "
    +                                        + index.getTenantId());
    +                    }
    +                    indexes.add(latestIndex);
    +                }
    +                table = PTableImpl.makePTable(table, table.getTimeStamp(), 
indexes);
    +            }
    +        }
    +        
    +        MetaDataProtos.MutationCode mutationCode =
    +                table != null ? 
MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS
    +                        : MetaDataProtos.MutationCode.TABLE_NOT_FOUND;
    +        return new Pair<PTable, MetaDataProtos.MutationCode>(table, 
mutationCode);
    +    }
    +
    +    
    +    private PTable addDerivedColumnsFromAncestors(PTable table, long 
timestamp,
    +            int clientVersion, PTable lockedAncestorTable) throws 
IOException, SQLException, TableNotFoundException {
    +        // combine columns for view and view indexes
    +        byte[] tenantId =
    +                table.getTenantId() != null ? 
table.getTenantId().getBytes()
    +                        : ByteUtil.EMPTY_BYTE_ARRAY;
    +        byte[] schemaName = table.getSchemaName().getBytes();
    +        byte[] tableName = table.getTableName().getBytes();
    +           String fullTableName = 
SchemaUtil.getTableName(table.getSchemaName().getString(),
    +                           table.getTableName().getString());
    +        boolean hasIndexId = table.getViewIndexId() != null;
    +        boolean isSalted = table.getBucketNum() != null;
    +        if (table.getType() != PTableType.VIEW && !hasIndexId) {
    +            return table;
    +        }
    +        boolean isDiverged = isDivergedView(table);
    +        // here you combine columns from the parent tables the logic is as 
follows, if the PColumn
    +        // is in the EXCLUDED_COLUMNS remove it, otherwise priority of 
keeping duplicate columns is
    +        // child -> parent
    +        List<TableInfo> ancestorList = Lists.newArrayList();
    +        TableViewFinderResult viewFinderResult = new 
TableViewFinderResult();
    +        if (PTableType.VIEW == table.getType()) {
    +            findAncestorViews(tenantId, schemaName, tableName, 
viewFinderResult,
    +                table.isNamespaceMapped());
    +        } else { // is a view index
    +            findAncestorViewsOfIndex(tenantId, schemaName, tableName, 
viewFinderResult,
    +                table.isNamespaceMapped());
    +        }
    +        if (viewFinderResult.getResults().isEmpty()) {
    +            // no need to combine columns for local indexes on regular 
tables
    +            return table;
    +        }
    +        for (TableInfo viewInfo : viewFinderResult.getResults()) {
    +            ancestorList.add(viewInfo);
    +        }
    +        List<PColumn> allColumns = Lists.newArrayList();
    +        List<PColumn> excludedColumns = Lists.newArrayList();
    +        // add my own columns first in reverse order
    +        List<PColumn> myColumns = table.getColumns();
    +        for (int i = myColumns.size() - 1; i >= 0; i--) {
    +            PColumn pColumn = myColumns.get(i);
    +            if (pColumn.isExcluded()) {
    +                excludedColumns.add(pColumn);
    +            } else if (!pColumn.equals(SaltingUtil.SALTING_COLUMN)) { 
    --- End diff --
    
    Instead of matching on SALTING_COLUMN, we should stop the loop at 1 above 
if table.getSaltBuckets() != null. The code never filters the salt column based 
on it's name.


> Support multi region SYSTEM.CATALOG table
> -----------------------------------------
>
>                 Key: PHOENIX-3534
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-3534
>             Project: Phoenix
>          Issue Type: Bug
>            Reporter: James Taylor
>            Assignee: Thomas D'Silva
>            Priority: Major
>             Fix For: 5.0.0, 4.15.0
>
>         Attachments: PHOENIX-3534.patch
>
>
> Currently Phoenix requires that the SYSTEM.CATALOG table is single region 
> based on the server-side row locks being held for operations that impact a 
> table and all of it's views. For example, adding/removing a column from a 
> base table pushes this change to all views.
> As an alternative to making the SYSTEM.CATALOG transactional (PHOENIX-2431), 
> when a new table is created we can do a lazy cleanup  of any rows that may be 
> left over from a failed DDL call (kudos to [~lhofhansl] for coming up with 
> this idea). To implement this efficiently, we'd need to also do PHOENIX-2051 
> so that we can efficiently find derived views.
> The implementation would rely on an optimistic concurrency model based on 
> checking our sequence numbers for each table/view before/after updating. Each 
> table/view row would be individually locked for their change (metadata for a 
> view or table cannot span regions due to our split policy), with the sequence 
> number being incremented under lock and then returned to the client.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to