virajjasani commented on code in PR #2209: URL: https://github.com/apache/phoenix/pull/2209#discussion_r2188072051
########## phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CDCCompactionUtil.java: ########## @@ -0,0 +1,395 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.coprocessor; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.CheckAndMutate; +import org.apache.hadoop.hbase.client.CheckAndMutateResult; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.expression.function.PartitionIdFunction; +import org.apache.phoenix.hbase.index.ValueGetter; +import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder; +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.index.IndexMaintainer; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.types.PDate; +import org.apache.phoenix.util.CDCUtil; +import org.apache.phoenix.util.IndexUtil; +import org.apache.phoenix.util.JacksonUtil; +import org.apache.phoenix.util.QueryUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static org.apache.phoenix.query.QueryConstants.NAME_SEPARATOR; + +/** + * Utility class for CDC (Change Data Capture) operations during compaction. + * This class contains utilities for handling TTL row expiration events and generating + * CDC events with pre-image data that are written directly to CDC index tables. + */ +public final class CDCCompactionUtil { + + private static final Logger LOGGER = LoggerFactory.getLogger(CDCCompactionUtil.class); + + private CDCCompactionUtil() { + // empty + } + + /** + * Finds the column name for a given cell in the data table. + * + * @param dataTable The data table + * @param cell The cell + * @return The column name or null if not found + */ + static String findColumnName(PTable dataTable, Cell cell) { + try { + byte[] family = CellUtil.cloneFamily(cell); + byte[] qualifier = CellUtil.cloneQualifier(cell); + byte[] defaultCf = dataTable.getDefaultFamilyName() != null + ? dataTable.getDefaultFamilyName().getBytes() + : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + for (PColumn column : dataTable.getColumns()) { + if (column.getFamilyName() != null + && Bytes.equals(family, column.getFamilyName().getBytes()) + && Bytes.equals(qualifier, column.getColumnQualifierBytes())) { + if (Bytes.equals(defaultCf, column.getFamilyName().getBytes())) { + return column.getName().getString(); + } else { + return column.getFamilyName().getString() + NAME_SEPARATOR + + column.getName().getString(); + } + } + } + } catch (Exception e) { + LOGGER.error("Error finding column name for cell: {}", CellUtil.toString(cell, true), + e); + } + return null; + } + + /** + * Creates a CDC event map for TTL delete with pre-image data. + * + * @param expiredRowPut The expired row data + * @param dataTable The data table + * @param preImage Pre-image map + * @return CDC event map + */ + static Map<String, Object> createTTLDeleteCDCEvent(Put expiredRowPut, PTable dataTable, + Map<String, Object> preImage) + throws Exception { + Map<String, Object> cdcEvent = new HashMap<>(); + cdcEvent.put(QueryConstants.CDC_EVENT_TYPE, QueryConstants.CDC_TTL_DELETE_EVENT_TYPE); + for (List<Cell> familyCells : expiredRowPut.getFamilyCellMap().values()) { + for (Cell cell : familyCells) { + String columnName = findColumnName(dataTable, cell); + if (columnName != null) { + PColumn column = dataTable.getColumnForColumnQualifier( + CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell)); + Object value = column.getDataType().toObject(cell.getValueArray(), + cell.getValueOffset(), + cell.getValueLength()); + Object encodedValue = + CDCUtil.getColumnEncodedValue(value, column.getDataType()); + preImage.put(columnName, encodedValue); + } + } + } + cdcEvent.put(QueryConstants.CDC_PRE_IMAGE, preImage); + cdcEvent.put(QueryConstants.CDC_POST_IMAGE, Collections.emptyMap()); + return cdcEvent; + } Review Comment: By looking at how CDCTableInfo is built, there is no direct API to construct it from data table PTable object such that we don't have to go through serialization and deserialization path. That's why i did not pursue using CDCTableInfo. Also, CDCChangeBuilder is tightly coupled with CDCTableInfo and therefore only used by the scanner init: ``` CDCUtil.setupScanForCDC(dataTableScan); cdcDataTableInfo = CDCTableInfo.createFromProto(CDCInfoProtos.CDCTableDef .parseFrom(scan.getAttribute(CDC_DATA_TABLE_DEF))); changeBuilder = new CDCChangeBuilder(cdcDataTableInfo); ``` Whereas here, we have much simple case: embed pre-built image with the index mutation because data table scan will not get us any data (major compacted). -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@phoenix.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org