This is an automated email from the ASF dual-hosted git repository.
leirui pushed a commit to branch research/M4-visualization
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/research/M4-visualization by
this push:
new 6d5c05c55fb update readme
6d5c05c55fb is described below
commit 6d5c05c55fb5e1d8a9dd66b4a39396681024dc7f
Author: Lei Rui <[email protected]>
AuthorDate: Thu Jul 20 00:31:16 2023 +0800
update readme
---
README.md | 7 +-
.../dataset/groupby/LocalGroupByExecutor4CPV.java | 96 +++++++++++-----------
.../file/metadata/statistics/StepRegress.java | 23 +++---
.../file/metadata/statistics/ValueIndex.java | 5 --
4 files changed, 62 insertions(+), 69 deletions(-)
diff --git a/README.md b/README.md
index a0dfb14e8c0..b34c982acb7 100644
--- a/README.md
+++ b/README.md
@@ -20,9 +20,8 @@
-->
# M4-LSM
-- The codes for two deployments, M4-UDF and M4-LSM, are available in this
repository.
- - M4-UDF: M4-UDF is implemented in
[server/src/main/java/org/apache/iotdb/db/query/udf/builtin/UDTFM4MAC.java](server/src/main/java/org/apache/iotdb/db/query/udf/builtin/UDTFM4MAC.java).
The document of the M4 function is available on the product
[website](https://iotdb.apache.org/UserGuide/Master/Operators-Functions/Sample.html#m4-function)
of Apache IoTDB.
- - M4-LSM: The candidate generation and verification framework is
implemented in
[server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java](server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java).
The step regression index is implemented in
[tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java](tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java).
+- The codes for two deployments, M4 and M4-LSM, are available in this
repository.
+ - M4: M4 is implemented in
[server/src/main/java/org/apache/iotdb/db/query/udf/builtin/UDTFM4MAC.java](server/src/main/java/org/apache/iotdb/db/query/udf/builtin/UDTFM4MAC.java).
+ - M4-LSM: The candidate generation and verification framework is
implemented in
[server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java](server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java).
The time index with step regression is implemented in
[tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java](tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepR
[...]
- Some integration tests for correctness are in
[server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java](server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java).
-- The codes, data and scripts for experiments are in [another GitHub
repository](https://anonymous.4open.science/r/M4-visualization-exp-D378) for
reproducibility.
- For the README of [Apache IoTDB](https://iotdb.apache.org/) itself, please
see [README_IOTDB.md](README_IOTDB.md). To build this repository, run `mvn
clean package -DskipTests -pl -distribution`.
diff --git
a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
index 0c541766eb4..c4dec2679a4 100644
---
a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
+++
b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
@@ -119,7 +119,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// unpackAllOverlappedFilesToTimeSeriesMetadata
try {
- // TODO: this might be bad to load all chunk metadata at first
+ // : this might be bad to load all chunk metadata at first
futureChunkList.addAll(seriesReader.getAllChunkMetadatas4CPV());
// order futureChunkList by chunk startTime
futureChunkList.sort(
@@ -252,7 +252,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
int curIdx = (int) Math.floor((curStartTime - startTime) * 1.0 / interval);
if (splitChunkList.get(curIdx) != null) {
currentChunkList.addAll(splitChunkList.get(curIdx));
- // TODO when to free splitChunkList memory
+ // when to free splitChunkList memory
}
// iterate futureChunkList
@@ -285,7 +285,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
chunkSuit4CPV.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN A
CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN A
CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE OF
STEPREGRESS IS ASSIGN
@@ -358,7 +358,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// currentChunkList
while (currentChunkList.size() > 0) { // loop 1
// sorted by bottomValue, find BP candidate set
- // TODO double check the sort order logic for different aggregations
+ // double check the sort order logic for different aggregations
currentChunkList.sort(
(o1, o2) -> {
return ((Comparable) (o1.getStatistics().getMinValue()))
@@ -377,9 +377,9 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
}
}
- // TODO check, whether nonLazyLoad remove affects candidateSet
+ // check, whether nonLazyLoad remove affects candidateSet
List<ChunkSuit4CPV> nonLazyLoad = new ArrayList<>(candidateSet);
- // TODO double check the sort order logic for version
+ // double check the sort order logic for version
nonLazyLoad.sort(
(o1, o2) ->
new MergeReaderPriority(
@@ -399,7 +399,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
chunkSuit4CPV.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE
PAGE IN A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN
A CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK
WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE OF
STEPREGRESS IS ASSIGN
@@ -431,8 +431,8 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
new MergeReaderPriority(
candidate.getChunkMetadata().getVersion(),
candidate.getChunkMetadata().getOffsetOfChunkHeader());
- long candidateTimestamp =
candidate.getStatistics().getBottomTimestamp(); // TODO check
- Object candidateValue = candidate.getStatistics().getMinValue(); //
TODO check
+ long candidateTimestamp =
candidate.getStatistics().getBottomTimestamp(); // check
+ Object candidateValue = candidate.getStatistics().getMinValue(); //
check
// verify if this candidate point is deleted
boolean isDeletedItself = false;
@@ -447,9 +447,9 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// the candidate point is deleted, then label the chunk as already
lazy loaded, and back
// to loop 2
nonLazyLoad.remove(candidate);
- // TODO check this can really remove the element
- // TODO check whether nonLazyLoad remove affects candidateSet
- // TODO check nonLazyLoad sorted by version number from high to low
+ // check this can really remove the element
+ // check whether nonLazyLoad remove affects candidateSet
+ // check nonLazyLoad sorted by version number from high to low
continue; // back to loop 2
} else { // not deleted
@@ -492,7 +492,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
chunkSuit4CPV.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE
PAGE IN A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE
IN A CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS
ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK
WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE
OF STEPREGRESS IS
@@ -524,12 +524,12 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
} else {
candidate
.getChunkMetadata()
- .insertIntoSortedDeletions(candidateTimestamp,
candidateTimestamp); // TODO check
+ .insertIntoSortedDeletions(candidateTimestamp,
candidateTimestamp); // check
}
nonLazyLoad.remove(candidate);
- // TODO check this can really remove the element
- // TODO check whether nonLazyLoad remove affects candidateSet
- // TODO check nonLazyLoad sorted by version number from high to low
+ // check this can really remove the element
+ // check whether nonLazyLoad remove affects candidateSet
+ // check nonLazyLoad sorted by version number from high to low
continue; // back to loop 2
}
}
@@ -550,7 +550,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
while (currentChunkList.size() > 0) { // loop 1
// sorted by topValue, find TP candidate set
currentChunkList.sort(
- new Comparator<ChunkSuit4CPV>() { // TODO double check the sort
order logic for different
+ new Comparator<ChunkSuit4CPV>() { // double check the sort order
logic for different
// aggregations
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
return ((Comparable) (o2.getStatistics().getMaxValue()))
@@ -574,9 +574,9 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
}
List<ChunkSuit4CPV> nonLazyLoad = new ArrayList<>(candidateSet);
- // TODO check, whether nonLazyLoad remove affects candidateSet
+ // check, whether nonLazyLoad remove affects candidateSet
nonLazyLoad.sort(
- new Comparator<ChunkSuit4CPV>() { // TODO double check the sort
order logic for version
+ new Comparator<ChunkSuit4CPV>() { // double check the sort order
logic for version
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
return new MergeReaderPriority(
o2.getChunkMetadata().getVersion(),
@@ -597,7 +597,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
chunkSuit4CPV.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE
PAGE IN A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN
A CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK
WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE OF
STEPREGRESS IS ASSIGN
@@ -614,7 +614,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
}
// chunk data read operation (c)
//
chunkSuit4CPV.getPageReader().updateBPTP(chunkSuit4CPV);
-
chunkSuit4CPV.getPageReader().updateTP_withValueIndex(chunkSuit4CPV); // TODO
+
chunkSuit4CPV.getPageReader().updateTP_withValueIndex(chunkSuit4CPV); //
// check if empty
if (chunkSuit4CPV.statistics.getCount() == 0) {
currentChunkList.remove(chunkSuit4CPV);
@@ -633,8 +633,8 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// because statistics of ChunkSuit4CPV is updated, while statistics of
// ChunkSuit4CPV.ChunkMetadata
// is fixed.
- long candidateTimestamp = candidate.getStatistics().getTopTimestamp();
// TODO check
- Object candidateValue = candidate.getStatistics().getMaxValue(); //
TODO check
+ long candidateTimestamp = candidate.getStatistics().getTopTimestamp();
// check
+ Object candidateValue = candidate.getStatistics().getMaxValue(); //
check
// verify if this candidate point is deleted
boolean isDeletedItself = false;
@@ -649,9 +649,9 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// the candidate point is deleted, then label the chunk as already
lazy loaded, and back
// to loop 2
nonLazyLoad.remove(candidate);
- // TODO check this can really remove the element
- // TODO check whether nonLazyLoad remove affects candidateSet
- // TODO check nonLazyLoad sorted by version number from high to low
+ // check this can really remove the element
+ // check whether nonLazyLoad remove affects candidateSet
+ // check nonLazyLoad sorted by version number from high to low
continue; // back to loop 2
} else { // not deleted
@@ -694,7 +694,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
chunkSuit4CPV.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE
PAGE IN A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE
IN A CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS
ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK
WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE
OF STEPREGRESS IS
@@ -726,12 +726,12 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
} else {
candidate
.getChunkMetadata()
- .insertIntoSortedDeletions(candidateTimestamp,
candidateTimestamp); // TODO check
+ .insertIntoSortedDeletions(candidateTimestamp,
candidateTimestamp); // check
}
nonLazyLoad.remove(candidate);
- // TODO check this can really remove the element
- // TODO check whether nonLazyLoad remove affects candidateSet
- // TODO check nonLazyLoad sorted by version number from high to low
+ // check this can really remove the element
+ // check whether nonLazyLoad remove affects candidateSet
+ // check nonLazyLoad sorted by version number from high to low
continue; // back to loop 2
}
}
@@ -750,7 +750,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
while (currentChunkList.size() > 0) { // loop 1
// sorted by startTime and version, find FP candidate
currentChunkList.sort(
- new Comparator<ChunkSuit4CPV>() { // TODO double check the sort
order logic for different
+ new Comparator<ChunkSuit4CPV>() { // double check the sort order
logic for different
// aggregations
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
// NOTE here get statistics from ChunkSuit4CPV, not from
ChunkSuit4CPV.ChunkMetadata
@@ -780,7 +780,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
susp_candidate.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN
A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN A
CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE OF
STEPREGRESS IS ASSIGN
@@ -797,8 +797,8 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// Note the higher versions of deletes are guaranteed by
// QueryUtils.modifyChunkMetaData(chunkMetadataList,pathModifications)
// NOTE here get statistics from ChunkSuit4CPV, not from
ChunkSuit4CPV.ChunkMetadata
- long candidateTimestamp =
susp_candidate.getStatistics().getStartTime(); // TODO check
- Object candidateValue =
susp_candidate.getStatistics().getFirstValue(); // TODO check
+ long candidateTimestamp =
susp_candidate.getStatistics().getStartTime(); // check
+ Object candidateValue =
susp_candidate.getStatistics().getFirstValue(); // check
boolean isDeletedItself = false;
long deleteEndTime = -1;
@@ -830,7 +830,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
// update chunkStartTime without loading data, and back to loop 1
susp_candidate.setLazyLoad(true);
// NOTE here get statistics from ChunkSuit4CPV, not from
ChunkSuit4CPV.ChunkMetadata
- susp_candidate.getStatistics().setStartTime(deleteEndTime + 1); //
TODO check
+ susp_candidate.getStatistics().setStartTime(deleteEndTime + 1); //
check
// +1 is because delete is closed interval
}
continue; // back to loop 1
@@ -861,7 +861,7 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
while (currentChunkList.size() > 0) { // loop 1
// sorted by endTime and version, find LP candidate
currentChunkList.sort(
- new Comparator<ChunkSuit4CPV>() { // TODO double check the sort
order logic for different
+ new Comparator<ChunkSuit4CPV>() {
// aggregations
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
int res =
@@ -891,29 +891,29 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
PageReader pageReader =
FileLoaderUtils.loadPageReaderList4CPV(
susp_candidate.getChunkMetadata(), this.timeFilter);
- // TODO ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN
A CHUNK,
+ // ATTENTION: YOU HAVE TO ENSURE THAT THERE IS ONLY ONE PAGE IN A
CHUNK,
// BECAUSE THE WHOLE IMPLEMENTATION IS BASED ON THIS ASSUMPTION.
// OTHERWISE, PAGEREADER IS FOR THE FIRST PAGE IN THE CHUNK WHILE
// STEPREGRESS IS FOR THE LAST PAGE IN THE CHUNK (THE MERGE OF
STEPREGRESS IS ASSIGN
// DIRECTLY), WHICH WILL INTRODUCE BUGS!
susp_candidate.setPageReader(pageReader);
}
- // TODO update LP equal to or before statistics.getEndTime
+ // update LP equal to or before statistics.getEndTime
// (b) get the closest data point after or before a timestamp
susp_candidate.updateLPwithTheClosetPointEqualOrBefore(
- susp_candidate.getStatistics().getEndTime()); // TODO DEBUG
- susp_candidate.setLazyLoad(false); // TODO DO NOT FORGET THIS!!!
+ susp_candidate.getStatistics().getEndTime()); // DEBUG
+ susp_candidate.setLazyLoad(false); // DO NOT FORGET THIS!!!
continue; // back to loop 1
} else {
// the chunk has not been lazy loaded, then verify whether the
candidate point is deleted
// Note the higher versions of deletes are guaranteed by
// QueryUtils.modifyChunkMetaData(chunkMetadataList,pathModifications)
- // TODO NOTE here get statistics from ChunkSuit4CPV, not from
ChunkSuit4CPV.ChunkMetadata
- long candidateTimestamp = susp_candidate.getStatistics().getEndTime();
// TODO check
- Object candidateValue = susp_candidate.getStatistics().getLastValue();
// TODO check
+ // NOTE here get statistics from ChunkSuit4CPV, not from
ChunkSuit4CPV.ChunkMetadata
+ long candidateTimestamp = susp_candidate.getStatistics().getEndTime();
// check
+ Object candidateValue = susp_candidate.getStatistics().getLastValue();
// check
boolean isDeletedItself = false;
- long deleteStartTime = Long.MAX_VALUE; // TODO check
+ long deleteStartTime = Long.MAX_VALUE; // check
List<TimeRange> deleteIntervalList =
susp_candidate.getChunkMetadata().getDeleteIntervalList();
if (deleteIntervalList != null) {
@@ -939,11 +939,11 @@ public class LocalGroupByExecutor4CPV implements
GroupByExecutor {
currentChunkList.remove(susp_candidate);
} else {
susp_candidate.setLazyLoad(true);
- // TODO NOTE here get statistics from ChunkSuit4CPV, not from
+ // NOTE here get statistics from ChunkSuit4CPV, not from
// ChunkSuit4CPV.ChunkMetadata
susp_candidate.getStatistics().setEndTime(deleteStartTime - 1);
// -1 is because delete is closed interval
- // TODO check
+ // check
}
continue; // back to loop 1
} else {
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java
index 9898d145e6f..4d973cb2663 100644
---
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java
+++
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/StepRegress.java
@@ -46,7 +46,7 @@ public class StepRegress {
// fix that the first segment [t1,t2) is always tilt,
// so t2=t1 in fact means that the first status is level
private DoubleArrayList segmentKeys = new DoubleArrayList(); // t1,t2,...,tm
- // TODO deal with the last key tm
+ // deal with the last key tm
private LongArrayList timestamps = new LongArrayList(); // Pi.t
private LongArrayList intervals = new LongArrayList(); // Pi+1.t-Pi.t
@@ -169,11 +169,10 @@ public class StepRegress {
// intersect
segmentKeys.add((intercept - segmentIntercepts.getLast()) / slope);
// x2i=(b2i-b2i-1)/K
// then add intercept to segmentIntercepts, do not change the order
of codes here
- segmentIntercepts.add(
- intercept); // TODO debug if the first status is actually level
works
+ segmentIntercepts.add(intercept); // debug if the first status is
actually level works
}
// deal with the last interval to make sure the last point is hit
- // TODO create examples to debug this
+ // create examples to debug this
if (i == intervals.size() - 1) {
// 3) to determine the intercept, let the level function run through
// (timestamps.getLast(),timestamps.size())
@@ -185,7 +184,7 @@ public class StepRegress {
segmentKeys.set(
segmentKeys.size() - 1,
(intercept - segmentIntercepts.get(segmentIntercepts.size() - 2))
- / slope); // x2i=(b2i-b2i-1)/K TODO debug here not getLast!
+ / slope); // x2i=(b2i-b2i-1)/K debug here not getLast!
// then add intercept to segmentIntercepts, do not change the order
of codes here
// Note that here is rewrite instead of add.
segmentIntercepts.set(segmentIntercepts.size() - 1, intercept);
@@ -206,7 +205,7 @@ public class StepRegress {
tiltLatestSegmentID += 2;
}
// deal with the last interval to make sure the last point is hit
- // TODO create examples to debug this
+ // create examples to debug this
if (i == intervals.size() - 1) {
if (segmentIntercepts.size() == 1) { // all TTTTTT, only one segment
info
// remove all segment info, and directly connect the first and the
last point
@@ -226,7 +225,7 @@ public class StepRegress {
segmentKeys.set(
segmentKeys.size() - 1,
(segmentIntercepts.get(segmentIntercepts.size() - 2) -
intercept)
- / slope); // x2i+1=(b2i-b2i+1)/K TODO debug here not
getLast!
+ / slope); // x2i+1=(b2i-b2i+1)/K debug here not getLast!
// then add intercept to segmentIntercepts, do not change the
order of codes here
// Note that here is rewrite instead of add.
segmentIntercepts.set(segmentIntercepts.size() - 1, intercept);
@@ -234,8 +233,8 @@ public class StepRegress {
// now check to remove possible disorders
// search from back to front to find the first tilt intercept that
is equal to or larger
// than the current intercept
- int start = segmentIntercepts.size() - 3; // TODO debug
- // TODO consider only one T
+ int start = segmentIntercepts.size() - 3; // debug
+ // consider only one T
boolean equals = false;
for (; start >= 0; start -= 2) {
// note the step is 2, only tilt intercept, no level intercept
@@ -248,7 +247,7 @@ public class StepRegress {
break;
}
}
- if (start < 0) { // TODO bug consider when start<0, i.e., not
found: connecting directly
+ if (start < 0) { // bug consider when start<0, i.e., not found:
connecting directly
// remove all segment info, and directly connect the first and
the last point
this.slope =
(timestamps.size() - 1.0) / (timestamps.getLast() -
timestamps.getFirst());
@@ -288,7 +287,7 @@ public class StepRegress {
segmentKeys =
DoubleArrayList.newListWith(
Arrays.copyOfRange(segmentKeys.toArray(), 0, start +
1));
- // TODO debug the first status is level, b1
+ // debug the first status is level, b1
}
}
// otherwise start==segmentIntercepts.size()-3 && equal=false,
@@ -425,7 +424,7 @@ public class StepRegress {
* in the chunk. Pay attention that f(t) starts from (startTime,1), ends
at (endTime,count).
*/
public double infer_internal(double t) throws IOException {
- if (segmentKeys.size() == 1) { // TODO DEBUG
+ if (segmentKeys.size() == 1) { // DEBUG
return 1;
}
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/ValueIndex.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/ValueIndex.java
index cfe34498d97..24fadfe5ecf 100644
---
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/ValueIndex.java
+++
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/statistics/ValueIndex.java
@@ -107,11 +107,6 @@ public class ValueIndex {
}
}
}
- // // add the last point except the first point
- // if (values.size() >= 2) { // means there is last point except the
first point
- // idxEncoder.encode(pos, idxOut);
- // valueEncoder.encode(values.getLast(), valueOut);
- // }
if (hasDataToFlush) {
// otherwise no need flush, because GorillaV2 encoding will output NaN
even if