[
https://issues.apache.org/jira/browse/PHOENIX-6181?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17214947#comment-17214947
]
ASF GitHub Bot commented on PHOENIX-6181:
-----------------------------------------
kadirozde commented on a change in pull request #915:
URL: https://github.com/apache/phoenix/pull/915#discussion_r505789656
##########
File path:
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
##########
@@ -0,0 +1,426 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import static
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.PHYSICAL_DATA_TABLE_NAME;
+import static org.apache.phoenix.query.QueryConstants.AGG_TIMESTAMP;
+import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN;
+import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.HBaseFactoryProvider;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.compile.ScanRanges;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.hbase.index.parallel.Task;
+import org.apache.phoenix.hbase.index.parallel.TaskBatch;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.mapreduce.index.IndexTool;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Maps;
+
+public class IndexRepairRegionScanner extends GlobalIndexRegionScanner {
+
+ private static final Logger LOGGER =
LoggerFactory.getLogger(IndexRepairRegionScanner.class);
+
+ public IndexRepairRegionScanner(final RegionScanner innerScanner,
+ final Region region,
+ final Scan scan,
+ final RegionCoprocessorEnvironment env,
+ final UngroupedAggregateRegionObserver
ungroupedAggregateRegionObserver)
+ throws IOException {
+ super(innerScanner, region, scan, env,
ungroupedAggregateRegionObserver);
+
+ byte[] dataTableName = scan.getAttribute(PHYSICAL_DATA_TABLE_NAME);
+ dataHTable = hTableFactory.getTable(new
ImmutableBytesPtr(dataTableName));
+ indexTableTTL =
region.getTableDesc().getColumnFamilies()[0].getTimeToLive();
+ try (org.apache.hadoop.hbase.client.Connection connection =
+
HBaseFactoryProvider.getHConnectionFactory().createConnection(env.getConfiguration()))
{
+ regionEndKeys =
connection.getRegionLocator(dataHTable.getName()).getEndKeys();
+ }
+ }
+
+ public void prepareExpectedIndexMutations(Result dataRow, Map<byte[],
List<Mutation>> expectedIndexMutationMap) throws IOException {
+ Put put = null;
+ Delete del = null;
+ for (Cell cell : dataRow.rawCells()) {
+ if (KeyValue.Type.codeToType(cell.getTypeByte()) ==
KeyValue.Type.Put) {
+ if (put == null) {
+ put = new Put(CellUtil.cloneRow(cell));
+ }
+ put.add(cell);
+ } else {
+ if (del == null) {
+ del = new Delete(CellUtil.cloneRow(cell));
+ }
+ del.addDeleteMarker(cell);
+ }
+ }
+ List<Mutation> indexMutations =
prepareIndexMutationsForRebuild(indexMaintainer, put, del);
+ Collections.reverse(indexMutations);
+ for (Mutation mutation : indexMutations) {
+ byte[] indexRowKey = mutation.getRow();
+ List<Mutation> mutationList =
expectedIndexMutationMap.get(indexRowKey);
+ if (mutationList == null) {
+ mutationList = new ArrayList<>();
+ mutationList.add(mutation);
+ expectedIndexMutationMap.put(indexRowKey, mutationList);
+ } else {
+ mutationList.add(mutation);
+ }
+ }
+ }
+
+ private void repairIndexRows(Map<byte[], List<Mutation>> indexMutationMap,
+ List<Mutation> indexRowsToBeDeleted,
+ IndexToolVerificationResult
verificationResult) throws IOException {
+ try {
+ int batchSize = 0;
+ List<Mutation> indexUpdates = new
ArrayList<Mutation>(maxBatchSize);
+ for (List<Mutation> mutationList : indexMutationMap.values()) {
+ indexUpdates.addAll(mutationList);
+ batchSize += mutationList.size();
+ if (batchSize >= maxBatchSize) {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ region.batchMutate(indexUpdates.toArray(new
Mutation[indexUpdates.size()]),
+ HConstants.NO_NONCE, HConstants.NO_NONCE);
+ batchSize = 0;
+ indexUpdates = new ArrayList<Mutation>(maxBatchSize);
+ }
+ }
+ if (batchSize > 0) {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ region.batchMutate(indexUpdates.toArray(new
Mutation[indexUpdates.size()]),
+ HConstants.NO_NONCE, HConstants.NO_NONCE);
+ }
+ batchSize = 0;
+ indexUpdates = new ArrayList<Mutation>(maxBatchSize);
+ for (Mutation mutation : indexRowsToBeDeleted) {
+ indexUpdates.add(mutation);
+ batchSize ++;
+ if (batchSize >= maxBatchSize) {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ region.batchMutate(indexUpdates.toArray(new
Mutation[indexUpdates.size()]),
+ HConstants.NO_NONCE, HConstants.NO_NONCE);
+ batchSize = 0;
+ indexUpdates = new ArrayList<Mutation>(maxBatchSize);
+ }
+ }
+ if (batchSize > 0) {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ region.batchMutate(indexUpdates.toArray(new
Mutation[indexUpdates.size()]),
+ HConstants.NO_NONCE, HConstants.NO_NONCE);
+ }
+ if (verify) {
+
verificationResult.setRebuiltIndexRowCount(verificationResult.getRebuiltIndexRowCount()
+ indexMutationMap.size());
+ }
+ } catch (Throwable t) {
+
ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
+ }
+ }
+
+ private Map<byte[], List<Mutation>>
populateExpectedIndexMutationMap(Set<byte[]> dataRowKeys) throws IOException {
+ Map<byte[], List<Mutation>> expectedIndexMutationMap =
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+ List<KeyRange> keys = new ArrayList<>(dataRowKeys.size());
+ for (byte[] indexKey: dataRowKeys) {
+ keys.add(PVarbinary.INSTANCE.getKeyRange(indexKey));
+ }
+ ScanRanges scanRanges = ScanRanges.createPointLookup(keys);
+ Scan dataScan = new Scan();
+ dataScan.setTimeRange(scan.getTimeRange().getMin(),
scan.getTimeRange().getMax());
+ scanRanges.initializeScan(dataScan);
+ SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
+ dataScan.setFilter(new SkipScanFilter(skipScanFilter, true));
+ dataScan.setRaw(true);
+ dataScan.setMaxVersions();
+ dataScan.setCacheBlocks(false);
+ try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) {
+ for (Result result = resultScanner.next(); (result != null);
result = resultScanner.next()) {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ prepareExpectedIndexMutations(result,
expectedIndexMutationMap);
+ }
+ } catch (Throwable t) {
+ ServerUtil.throwIOException(dataHTable.getName().toString(), t);
+ }
+ return expectedIndexMutationMap;
+ }
+
+ private Map<byte[], List<Mutation>>
populateActualIndexMutationMap(Map<byte[], List<Mutation>>
expectedIndexMutationMap) throws IOException {
+ Map<byte[], List<Mutation>> actualIndexMutationMap =
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+ Scan indexScan = prepareIndexScan(expectedIndexMutationMap);
+ try (RegionScanner regionScanner = region.getScanner(indexScan)) {
+ do {
+ ungroupedAggregateRegionObserver.checkForRegionClosing();
+ List<Cell> row = new ArrayList<Cell>();
+ hasMore = regionScanner.nextRaw(row);
+ if (!row.isEmpty()) {
+ populateIndexMutationFromIndexRow(row,
actualIndexMutationMap);
+ }
+ } while (hasMore);
+ } catch (Throwable t) {
+
ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
+ }
+ return actualIndexMutationMap;
+ }
+
+ private void repairAndOrVerifyIndexRows(Set<byte[]> dataRowKeys,
+ Map<byte[], List<Mutation>>
actualIndexMutationMap,
+ IndexToolVerificationResult
verificationResult) throws IOException {
+ List<Mutation> indexRowsToBeDeleted = new ArrayList<>();
+ Map<byte[], List<Mutation>> expectedIndexMutationMap =
populateExpectedIndexMutationMap(dataRowKeys);
+ if (verifyType == IndexTool.IndexVerifyType.NONE) {
+ repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted,
verificationResult);
+ return;
+ }
+ if (verifyType == IndexTool.IndexVerifyType.ONLY) {
+ verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap,
Collections.EMPTY_SET, Collections.EMPTY_LIST, verificationResult.getBefore(),
true);
+ return;
+ }
+ if (verifyType == IndexTool.IndexVerifyType.BEFORE) {
+ verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap,
Collections.EMPTY_SET, indexRowsToBeDeleted, verificationResult.getBefore(),
true);
+ if (!expectedIndexMutationMap.isEmpty() ||
!indexRowsToBeDeleted.isEmpty()) {
+ repairIndexRows(expectedIndexMutationMap,
indexRowsToBeDeleted, verificationResult);
+ }
+ return;
+ }
+ if (verifyType == IndexTool.IndexVerifyType.AFTER) {
Review comment:
I will leave this decision to be made within IndexTool. I think we can
still allow these options and state in the help text for IndexTook that they do
not remove the stale index rows.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
> IndexRepairRegionScanner to verify and repair every global index row
> --------------------------------------------------------------------
>
> Key: PHOENIX-6181
> URL: https://issues.apache.org/jira/browse/PHOENIX-6181
> Project: Phoenix
> Issue Type: Improvement
> Affects Versions: 5.0.0, 4.14.3
> Reporter: Kadir OZDEMIR
> Assignee: Kadir OZDEMIR
> Priority: Major
> Attachments: PHOENIX-6181.4.x.001.patch
>
>
> IndexRebuildRegionScanner is the server side engine to rebuild and verify
> every index row pointed by the data table. IndexRebuildRegionScanner runs on
> data table regions and scans every data table rows locally, and then rebuilds
> and verifies index table rows referenced by the data table rows over
> server-to-server RPCs using the HBase client installed on region servers.
> However, IndexRebuildRegionScanner cannot clean up the index rows that are
> not referenced by the data table if there are such index rows. In order to do
> that we need another region scanner that scans index table regions and makes
> sure that every index row is valid. This region scanner will be called
> IndexRepairRegionScanner.
--
This message was sent by Atlassian Jira
(v8.3.4#803005)