This is an automated email from the ASF dual-hosted git repository. leirui pushed a commit to branch research/M4-visualization in repository https://gitbox.apache.org/repos/asf/iotdb.git
commit 052fd37301e8eebfd938916817f661d08fa8bbd7 Author: Lei Rui <[email protected]> AuthorDate: Mon Jun 12 11:57:12 2023 +0800 add IOMonitor2 and add metrics for raw data query --- .../apache/iotdb/db/engine/cache/ChunkCache.java | 6 + .../org/apache/iotdb/db/service/TSServiceImpl.java | 42 ++-- .../org/apache/iotdb/db/utils/FileLoaderUtils.java | 3 + .../iotdb/tsfile/read/common/IOMonitor2.java | 227 +++++++++++++++++++++ .../tsfile/read/reader/chunk/ChunkReader.java | 5 + .../iotdb/tsfile/read/reader/page/PageReader.java | 7 +- 6 files changed, 272 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java index 0e96e2a4ccd..204a99e76ef 100644 --- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java +++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java @@ -27,6 +27,8 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata; import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.common.Chunk; import org.apache.iotdb.tsfile.read.common.IOMonitor; +import org.apache.iotdb.tsfile.read.common.IOMonitor2; +import org.apache.iotdb.tsfile.read.common.IOMonitor2.Operation; import org.apache.iotdb.tsfile.utils.RamUsageEstimator; import com.github.benmanes.caffeine.cache.Caffeine; @@ -80,6 +82,8 @@ public class ChunkCache { .get(chunkMetadata.getFilePath(), chunkMetadata.isClosed()); long start = System.nanoTime(); Chunk ret = reader.readMemChunk(chunkMetadata); + IOMonitor2.addMeasure( + Operation.DCP_B_READ_MEM_CHUNK, System.nanoTime() - start); IOMonitor.incReadMemChunkTime(System.nanoTime() - start); return ret; } catch (IOException e) { @@ -102,7 +106,9 @@ public class ChunkCache { TsFileSequenceReader reader = FileReaderManager.getInstance() .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed()); + long start = System.nanoTime(); Chunk chunk = reader.readMemChunk(chunkMetaData); + IOMonitor2.addMeasure(Operation.DCP_B_READ_MEM_CHUNK, System.nanoTime() - start); return new Chunk( chunk.getHeader(), chunk.getData().duplicate(), diff --git a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java index 00c01bb37da..4c396572e80 100644 --- a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java +++ b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java @@ -128,6 +128,7 @@ import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.read.common.IOMonitor; +import org.apache.iotdb.tsfile.read.common.IOMonitor2; import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; @@ -628,6 +629,7 @@ public class TSServiceImpl implements TSIService.Iface { @Override public TSExecuteStatementResp executeQueryStatement(TSExecuteStatementReq req) { + long start = System.nanoTime(); try { if (!checkLogin(req.getSessionId())) { return RpcUtils.getTSExecuteStatementResp(TSStatusCode.NOT_LOGIN_ERROR); @@ -638,17 +640,21 @@ public class TSServiceImpl implements TSIService.Iface { processor.parseSQLToPhysicalPlan( statement, sessionManager.getZoneId(req.sessionId), req.fetchSize); // System.out.println("====DEBUG====: fetchSize=" + req.fetchSize); - return physicalPlan.isQuery() - ? internalExecuteQueryStatement( - statement, - req.statementId, - physicalPlan, - req.fetchSize, - req.timeout, - sessionManager.getUsername(req.getSessionId()), - req.isEnableRedirectQuery()) - : RpcUtils.getTSExecuteStatementResp( - TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is not a query statement."); + TSExecuteStatementResp resp = + physicalPlan.isQuery() + ? internalExecuteQueryStatement( + statement, + req.statementId, + physicalPlan, + req.fetchSize, + req.timeout, + sessionManager.getUsername(req.getSessionId()), + req.isEnableRedirectQuery()) + : RpcUtils.getTSExecuteStatementResp( + TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is not a query statement."); + IOMonitor2.addMeasure( + IOMonitor2.Operation.DCP_Server_Query_Execute, System.nanoTime() - start); + return resp; } catch (InterruptedException e) { LOGGER.error(INFO_INTERRUPT_ERROR, req, e); Thread.currentThread().interrupt(); @@ -664,7 +670,7 @@ public class TSServiceImpl implements TSIService.Iface { public TSExecuteFinishResp executeFinish() throws TException { TSExecuteFinishResp ret = new TSExecuteFinishResp(); ret.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); - ret.setExecutionInfo(IOMonitor.print()); + ret.setExecutionInfo(IOMonitor.print() + "\n" + IOMonitor2.print()); IOMonitor.finish(); return ret; } @@ -719,8 +725,8 @@ public class TSServiceImpl implements TSIService.Iface { // start record execution time IOMonitor.setSQL(statement); - queryCount.incrementAndGet(); long start = System.nanoTime(); + queryCount.incrementAndGet(); AUDIT_LOGGER.debug( "Session {} execute Query: {}", sessionManager.getCurrSessionId(), statement); long startTime = System.currentTimeMillis(); @@ -1017,6 +1023,7 @@ public class TSServiceImpl implements TSIService.Iface { @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning @Override public TSFetchResultsResp fetchResults(TSFetchResultsReq req) { + long start = System.nanoTime(); try { if (!checkLogin(req.getSessionId())) { return RpcUtils.getTSFetchResultsResp(TSStatusCode.NOT_LOGIN_ERROR); @@ -1032,6 +1039,7 @@ public class TSServiceImpl implements TSIService.Iface { req.queryId, System.currentTimeMillis(), req.statement, req.timeout); QueryDataSet queryDataSet = sessionManager.getDataset(req.queryId); + TSFetchResultsResp resp; if (req.isAlign) { TSQueryDataSet result = fillRpcReturnData( @@ -1040,13 +1048,12 @@ public class TSServiceImpl implements TSIService.Iface { if (!hasResultSet) { releaseQueryResourceNoExceptions(req.queryId); } - TSFetchResultsResp resp = RpcUtils.getTSFetchResultsResp(TSStatusCode.SUCCESS_STATUS); + resp = RpcUtils.getTSFetchResultsResp(TSStatusCode.SUCCESS_STATUS); resp.setHasResultSet(hasResultSet); resp.setQueryDataSet(result); resp.setIsAlign(true); queryTimeManager.unRegisterQuery(req.queryId); - return resp; } else { TSQueryNonAlignDataSet nonAlignResult = fillRpcNonAlignReturnData( @@ -1061,14 +1068,15 @@ public class TSServiceImpl implements TSIService.Iface { if (!hasResultSet) { sessionManager.removeDataset(req.queryId); } - TSFetchResultsResp resp = RpcUtils.getTSFetchResultsResp(TSStatusCode.SUCCESS_STATUS); + resp = RpcUtils.getTSFetchResultsResp(TSStatusCode.SUCCESS_STATUS); resp.setHasResultSet(hasResultSet); resp.setNonAlignQueryDataSet(nonAlignResult); resp.setIsAlign(false); queryTimeManager.unRegisterQuery(req.queryId); - return resp; } + IOMonitor2.addMeasure(IOMonitor2.Operation.DCP_Server_Query_Fetch, System.nanoTime() - start); + return resp; } catch (InterruptedException e) { LOGGER.error(INFO_INTERRUPT_ERROR, req, e); Thread.currentThread().interrupt(); diff --git a/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java b/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java index fd7b6f4cb1d..59c2201251b 100644 --- a/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java +++ b/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java @@ -34,6 +34,8 @@ import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata; import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.common.Chunk; import org.apache.iotdb.tsfile.read.common.IOMonitor; +import org.apache.iotdb.tsfile.read.common.IOMonitor2; +import org.apache.iotdb.tsfile.read.common.IOMonitor2.Operation; import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.controller.IChunkLoader; import org.apache.iotdb.tsfile.read.filter.basic.Filter; @@ -144,6 +146,7 @@ public class FileLoaderUtils { } long duration = System.nanoTime() - start; IOMonitor.incMeta(duration); + IOMonitor2.addMeasure(Operation.DCP_A_GET_CHUNK_METADATAS, System.nanoTime() - start); return timeSeriesMetadata; } diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IOMonitor2.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IOMonitor2.java new file mode 100644 index 00000000000..1682eaa6494 --- /dev/null +++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/IOMonitor2.java @@ -0,0 +1,227 @@ +package org.apache.iotdb.tsfile.read.common; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IOMonitor2 { + + public enum Operation { + DCP_Server_Query_Execute("DCP_Server_Query_Execute"), + DCP_Server_Query_Fetch("DCP_Server_Query_Fetch"), + DCP_SeriesScanOperator_hasNext("DCP_SeriesScanOperator_hasNext"), + DCP_A_GET_CHUNK_METADATAS("DCP_A_GET_CHUNK_METADATAS"), + DCP_B_READ_MEM_CHUNK("DCP_B_READ_MEM_CHUNK"), + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA( + "DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA"), + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS("DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS"); + + public String getName() { + return name; + } + + String name; + + Operation(String name) { + this.name = name; + } + } + + /** + * [Raw data query metrics] - server端execute&fetch的次数和耗时 - + * SeriesScanOperator.hasNext的次数和耗时(问题:root.hasNext执行次数等于decode + * page次数,但是SeriesScanOperator.hasNext次数大于那个次数?破案了:是root.isFinished->SeriesScanOperator.isFinished里面调用了hasNext,和root.hasNext里调用SeriesScanOperator.hasNext的次数加起来就对了。) + * - A:从磁盘解读chunk元数据的次数和耗时 - B:从磁盘加载chunk数据的次数和耗时 - C:解压缩所有page构造pageReaderList的次数和耗时 - + * D:解码page并遍历点的次数和耗时 (遍历点数不用统计,因为一定是全量点) + */ + public static int DCP_Server_Query_Execute_count = 0; + + public static long DCP_Server_Query_Execute_ns = 0; + + public static int DCP_Server_Query_Fetch_count = 0; + public static long DCP_Server_Query_Fetch_ns = 0; + + public static int DCP_SeriesScanOperator_hasNext_count = 0; + public static long DCP_SeriesScanOperator_hasNext_ns = 0; + + public static int DCP_A_GET_CHUNK_METADATAS_count = 0; + public static long DCP_A_GET_CHUNK_METADATAS_ns = 0; + + public static int DCP_B_READ_MEM_CHUNK_count = 0; + public static long DCP_B_READ_MEM_CHUNK_ns = 0; + + public static int DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_count = 0; + public static long DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_ns = 0; + + public static int DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_count = 0; + public static long DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_ns = 0; + + public static long DCP_D_traversedPointNum = 0; + + private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("IOMonitor2"); + + private static void reset() { + DCP_Server_Query_Execute_count = 0; + DCP_Server_Query_Execute_ns = 0; + + DCP_Server_Query_Fetch_count = 0; + DCP_Server_Query_Fetch_ns = 0; + + DCP_SeriesScanOperator_hasNext_count = 0; + DCP_SeriesScanOperator_hasNext_ns = 0; + + DCP_A_GET_CHUNK_METADATAS_count = 0; + DCP_A_GET_CHUNK_METADATAS_ns = 0; + + DCP_B_READ_MEM_CHUNK_count = 0; + DCP_B_READ_MEM_CHUNK_ns = 0; + + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_count = 0; + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_ns = 0; + + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_count = 0; + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_ns = 0; + + DCP_D_traversedPointNum = 0; + } + + public static void addMeasure(Operation operation, long elapsedTimeInNanosecond) { + // TODO tmp for debug + // DEBUG_LOGGER.info(operation.getName() + ": " + elapsedTimeInNanosecond + " ns"); + switch (operation) { + case DCP_Server_Query_Execute: + DCP_Server_Query_Execute_count++; + DCP_Server_Query_Execute_ns += elapsedTimeInNanosecond; + break; + case DCP_Server_Query_Fetch: + DCP_Server_Query_Fetch_count++; + DCP_Server_Query_Fetch_ns += elapsedTimeInNanosecond; + break; + case DCP_SeriesScanOperator_hasNext: + DCP_SeriesScanOperator_hasNext_count++; + DCP_SeriesScanOperator_hasNext_ns += elapsedTimeInNanosecond; + break; + case DCP_A_GET_CHUNK_METADATAS: + DCP_A_GET_CHUNK_METADATAS_count++; + DCP_A_GET_CHUNK_METADATAS_ns += elapsedTimeInNanosecond; + break; + case DCP_B_READ_MEM_CHUNK: + DCP_B_READ_MEM_CHUNK_count++; + DCP_B_READ_MEM_CHUNK_ns += elapsedTimeInNanosecond; + break; + case DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA: + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_count++; + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_ns += elapsedTimeInNanosecond; + break; + case DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS: + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_count++; + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_ns += elapsedTimeInNanosecond; + break; + default: + System.out.println("not supported operation type"); // this will not happen + break; + } + } + + public static void addMeasure(Operation operation, long elapsedTimeInNanosecond, int count) { + switch (operation) { + case DCP_Server_Query_Execute: + DCP_Server_Query_Execute_count += count; + DCP_Server_Query_Execute_ns += elapsedTimeInNanosecond; + break; + case DCP_Server_Query_Fetch: + DCP_Server_Query_Fetch_count += count; + DCP_Server_Query_Fetch_ns += elapsedTimeInNanosecond; + break; + case DCP_SeriesScanOperator_hasNext: + DCP_SeriesScanOperator_hasNext_count += count; + DCP_SeriesScanOperator_hasNext_ns += elapsedTimeInNanosecond; + break; + case DCP_A_GET_CHUNK_METADATAS: + DCP_A_GET_CHUNK_METADATAS_count += count; + DCP_A_GET_CHUNK_METADATAS_ns += elapsedTimeInNanosecond; + break; + case DCP_B_READ_MEM_CHUNK: + DCP_B_READ_MEM_CHUNK_count += count; + DCP_B_READ_MEM_CHUNK_ns += elapsedTimeInNanosecond; + break; + case DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA: + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_count += count; + DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_ns += elapsedTimeInNanosecond; + break; + case DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS: + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_count += count; + DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_ns += elapsedTimeInNanosecond; + break; + default: + System.out.println("not supported operation type"); // this will not happen + break; + } + } + + public static String print() { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder + .append("Server_Query_Execute_ns") + .append(",") + .append(DCP_Server_Query_Execute_ns) + .append("\n"); + stringBuilder + .append("Server_Query_Fetch_ns") + .append(",") + .append(DCP_Server_Query_Fetch_ns) + .append("\n"); + stringBuilder + .append("SeriesScanOperator_hasNext_ns") + .append(",") + .append(DCP_SeriesScanOperator_hasNext_ns) + .append("\n"); + stringBuilder.append("A_ns").append(",").append(DCP_A_GET_CHUNK_METADATAS_ns).append("\n"); + stringBuilder.append("B_ns").append(",").append(DCP_B_READ_MEM_CHUNK_ns).append("\n"); + stringBuilder + .append("C_ns") + .append(",") + .append(DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_ns) + .append("\n"); + stringBuilder + .append("D_ns") + .append(",") + .append(DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_ns) + .append("\n"); + + stringBuilder + .append("Server_Query_Execute_cnt") + .append(",") + .append(DCP_Server_Query_Execute_count) + .append("\n"); + stringBuilder + .append("Server_Query_Fetch_cnt") + .append(",") + .append(DCP_Server_Query_Fetch_count) + .append("\n"); + stringBuilder + .append("SeriesScanOperator_hasNext_cnt") + .append(",") + .append(DCP_SeriesScanOperator_hasNext_count) + .append("\n"); + stringBuilder.append("A_cnt").append(",").append(DCP_A_GET_CHUNK_METADATAS_count).append("\n"); + stringBuilder.append("B_cnt").append(",").append(DCP_B_READ_MEM_CHUNK_count).append("\n"); + stringBuilder + .append("C_cnt") + .append(",") + .append(DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA_count) + .append("\n"); + stringBuilder + .append("D_cnt") + .append(",") + .append(DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS_count) + .append("\n"); + stringBuilder + .append("DCP_D_traversedPointNum") + .append(",") + .append(DCP_D_traversedPointNum) + .append("\n"); + + reset(); // whenever print() is called, reset the metrics, to clean warm up information. + return stringBuilder.toString(); + } +} diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java index c794f87f78d..7e5011e1b37 100644 --- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java +++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/ChunkReader.java @@ -31,6 +31,8 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics; import org.apache.iotdb.tsfile.read.common.BatchData; import org.apache.iotdb.tsfile.read.common.Chunk; +import org.apache.iotdb.tsfile.read.common.IOMonitor2; +import org.apache.iotdb.tsfile.read.common.IOMonitor2.Operation; import org.apache.iotdb.tsfile.read.common.TimeRange; import org.apache.iotdb.tsfile.read.filter.basic.Filter; import org.apache.iotdb.tsfile.read.reader.IChunkReader; @@ -81,6 +83,7 @@ public class ChunkReader implements IChunkReader { } private void initAllPageReaders(Statistics chunkStatistic) throws IOException { + long start = System.nanoTime(); // construct next satisfied page header while (chunkDataBuffer.remaining() > 0) { // deserialize a PageHeader from chunkDataBuffer @@ -97,6 +100,8 @@ public class ChunkReader implements IChunkReader { skipBytesInStreamByLength(pageHeader.getCompressedSize()); } } + IOMonitor2.addMeasure( + Operation.DCP_C_DESERIALIZE_PAGEHEADER_DECOMPRESS_PAGEDATA, System.nanoTime() - start); } /** judge if has next page whose page header satisfies the filter. */ diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java index 68c7d4e6dd6..420a32735ac 100644 --- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java +++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java @@ -31,6 +31,8 @@ import org.apache.iotdb.tsfile.read.common.BatchData; import org.apache.iotdb.tsfile.read.common.BatchDataFactory; import org.apache.iotdb.tsfile.read.common.ChunkSuit4CPV; import org.apache.iotdb.tsfile.read.common.IOMonitor; +import org.apache.iotdb.tsfile.read.common.IOMonitor2; +import org.apache.iotdb.tsfile.read.common.IOMonitor2.Operation; import org.apache.iotdb.tsfile.read.common.TimeRange; import org.apache.iotdb.tsfile.read.filter.basic.Filter; import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; @@ -233,11 +235,12 @@ public class PageReader implements IPageReader { @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning @Override public BatchData getAllSatisfiedPageData(boolean ascending) throws IOException { - + long start = System.nanoTime(); BatchData pageData = BatchDataFactory.createBatchData(dataType, ascending, false); while (timeDecoder.hasNext(timeBuffer)) { IOMonitor.incPointsTravered(); + IOMonitor2.DCP_D_traversedPointNum++; long timestamp = timeDecoder.readLong(timeBuffer); switch (dataType) { case BOOLEAN: @@ -280,6 +283,8 @@ public class PageReader implements IPageReader { throw new UnSupportedDataTypeException(String.valueOf(dataType)); } } + IOMonitor2.addMeasure( + Operation.DCP_D_DECODE_PAGEDATA_TRAVERSE_POINTS, System.nanoTime() - start); return pageData.flip(); }
