This is an automated email from the ASF dual-hosted git repository.
jackietien pushed a commit to branch TyLRU
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git
The following commit(s) were added to refs/heads/TyLRU by this push:
new 4d63322 refactor lru
4d63322 is described below
commit 4d63322f4df893d1bdfef0e4a3fb29094a361725
Author: JackieTien97 <[email protected]>
AuthorDate: Thu May 14 14:37:08 2020 +0800
refactor lru
---
.../iotdb/db/engine/cache/AccountableString.java | 20 +++++++++++-
.../apache/iotdb/db/engine/cache/ChunkCache.java | 36 +++++++++++++---------
.../iotdb/db/engine/cache/ChunkMetadataCache.java | 31 +++++++++++--------
.../iotdb/db/engine/cache/LRULinkedHashMap.java | 3 +-
.../db/engine/cache/TimeSeriesMetadataCache.java | 25 ++++++++-------
.../db/query/executor/fill/LastPointReader.java | 1 -
.../iotdb/tsfile/file/metadata/ChunkMetadata.java | 2 +-
.../tsfile/file/metadata/TimeseriesMetadata.java | 13 +++++++-
8 files changed, 88 insertions(+), 43 deletions(-)
diff --git
a/server/src/main/java/org/apache/iotdb/db/engine/cache/AccountableString.java
b/server/src/main/java/org/apache/iotdb/db/engine/cache/AccountableString.java
index 7551552..e969562 100644
---
a/server/src/main/java/org/apache/iotdb/db/engine/cache/AccountableString.java
+++
b/server/src/main/java/org/apache/iotdb/db/engine/cache/AccountableString.java
@@ -1,10 +1,11 @@
package org.apache.iotdb.db.engine.cache;
+import java.util.Objects;
import org.apache.iotdb.tsfile.common.cache.Accountable;
public class AccountableString implements Accountable {
- private String string;
+ private final String string;
private long RAMSize;
public AccountableString(String string) {
@@ -24,4 +25,21 @@ public class AccountableString implements Accountable {
public long getRAMSize() {
return RAMSize;
}
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ AccountableString that = (AccountableString) o;
+ return Objects.equals(string, that.string);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(string);
+ }
}
diff --git
a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index 325131d..b2365fe 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -34,20 +34,21 @@ import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
- * This class is used to cache <code>Chunk</code> of
<code>ChunkMetaData</code> in IoTDB. The caching
- * strategy is LRU.
+ * This class is used to cache <code>Chunk</code> of
<code>ChunkMetaData</code> in IoTDB. The
+ * caching strategy is LRU.
*/
public class ChunkCache {
private static final Logger logger =
LoggerFactory.getLogger(ChunkCache.class);
private static final IoTDBConfig config =
IoTDBDescriptor.getInstance().getConfig();
- private static final long MEMORY_THRESHOLD_IN_CHUNK_CACHE =
config.getAllocateMemoryForChunkCache();
- private static boolean cacheEnable = config.isMetaDataCacheEnable();
+ private static final long MEMORY_THRESHOLD_IN_CHUNK_CACHE = config
+ .getAllocateMemoryForChunkCache();
+ private static final boolean cacheEnable = config.isMetaDataCacheEnable();
private final LRULinkedHashMap<ChunkMetadata, Chunk> lruCache;
- private AtomicLong cacheHitNum = new AtomicLong();
- private AtomicLong cacheRequestNum = new AtomicLong();
+ private final AtomicLong cacheHitNum = new AtomicLong();
+ private final AtomicLong cacheRequestNum = new AtomicLong();
private final ReadWriteLock lock = new ReentrantReadWriteLock();
@@ -62,13 +63,14 @@ public class ChunkCache {
protected long calEntrySize(ChunkMetadata key, Chunk value) {
long currentSize;
if (count < 10) {
- currentSize = RamUsageEstimator.shallowSizeOf(key) +
RamUsageEstimator.sizeOf(value);
+ currentSize = RamUsageEstimator.NUM_BYTES_OBJECT_REF +
RamUsageEstimator
+ .shallowSizeOf(key.getChunkLoader()) +
RamUsageEstimator.sizeOf(value);
averageSize = ((averageSize * count) + currentSize) / (++count);
} else if (count < 100000) {
count++;
currentSize = averageSize;
} else {
- averageSize = RamUsageEstimator.shallowSizeOf(key) +
RamUsageEstimator.sizeOf(value);
+ averageSize = RamUsageEstimator.NUM_BYTES_OBJECT_REF +
RamUsageEstimator.sizeOf(value);
count = 1;
currentSize = averageSize;
}
@@ -85,7 +87,8 @@ public class ChunkCache {
public Chunk get(ChunkMetadata chunkMetaData, TsFileSequenceReader reader)
throws IOException {
if (!cacheEnable) {
Chunk chunk = reader.readMemChunk(chunkMetaData);
- return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(), reader.getEndianType());
+ return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(),
+ reader.getEndianType());
}
cacheRequestNum.incrementAndGet();
@@ -96,7 +99,8 @@ public class ChunkCache {
cacheHitNum.incrementAndGet();
printCacheLog(true);
Chunk chunk = lruCache.get(chunkMetaData);
- return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(), reader.getEndianType());
+ return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(),
+ reader.getEndianType());
}
} finally {
lock.readLock().unlock();
@@ -115,12 +119,14 @@ public class ChunkCache {
cacheHitNum.incrementAndGet();
printCacheLog(true);
Chunk chunk = lruCache.get(chunkMetaData);
- return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(), reader.getEndianType());
+ return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(),
+ reader.getEndianType());
}
printCacheLog(false);
Chunk chunk = reader.readMemChunk(chunkMetaData);
lruCache.put(chunkMetaData, chunk);
- return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(), reader.getEndianType());
+ return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
chunk.getDeletedAt(),
+ reader.getEndianType());
} catch (IOException e) {
logger.error("something wrong happened while reading {}",
reader.getFileName());
throw e;
@@ -135,9 +141,9 @@ public class ChunkCache {
return;
}
logger.debug(
- "[ChunkMetaData cache {}hit] The number of requests for cache is
{}, hit rate is {}.",
- isHit ? "" : "didn't ", cacheRequestNum.get(),
- cacheHitNum.get() * 1.0 / cacheRequestNum.get());
+ "[ChunkMetaData cache {}hit] The number of requests for cache is {},
hit rate is {}.",
+ isHit ? "" : "didn't ", cacheRequestNum.get(),
+ cacheHitNum.get() * 1.0 / cacheRequestNum.get());
}
public double calculateChunkHitRatio() {
diff --git
a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
index 3740fe3..80846c0 100644
---
a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
+++
b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
@@ -47,7 +47,7 @@ public class ChunkMetadataCache {
private static final Logger logger =
LoggerFactory.getLogger(ChunkMetadataCache.class);
private static final IoTDBConfig config =
IoTDBDescriptor.getInstance().getConfig();
private static final long MEMORY_THRESHOLD_IN_B =
config.getAllocateMemoryForChunkMetaDataCache();
- private static boolean cacheEnable = config.isMetaDataCacheEnable();
+ private static final boolean cacheEnable = config.isMetaDataCacheEnable();
/**
* key: file path dot deviceId dot sensorId.
* <p>
@@ -57,8 +57,8 @@ public class ChunkMetadataCache {
private final ReadWriteLock lock = new ReentrantReadWriteLock();
- private AtomicLong cacheHitNum = new AtomicLong();
- private AtomicLong cacheRequestNum = new AtomicLong();
+ private final AtomicLong cacheHitNum = new AtomicLong();
+ private final AtomicLong cacheRequestNum = new AtomicLong();
private ChunkMetadataCache(long memoryThreshold) {
@@ -73,19 +73,26 @@ public class ChunkMetadataCache {
}
long entrySize;
if (count < 10) {
- long currentSize = RamUsageEstimator.shallowSizeOf(value.get(0))
- + RamUsageEstimator.shallowSizeOf(value.get(0).getStatistics());
+ long currentSize = RamUsageEstimator.shallowSizeOf(value.get(0)) +
RamUsageEstimator
+ .shallowSizeOf(value.get(0).getStatistics()) + RamUsageEstimator
+ .sizeOf(value.get(0).getMeasurementUid()) + RamUsageEstimator
+ .shallowSizeOf(value.get(0).getChunkLoader());
averageSize = ((averageSize * count) + currentSize) / (++count);
IoTDBConfigDynamicAdapter.setChunkMetadataSizeInByte(averageSize);
- entrySize = RamUsageEstimator.sizeOf(key) + currentSize *
value.size();
+ entrySize = RamUsageEstimator.sizeOf(key) + currentSize *
value.size() + RamUsageEstimator
+ .shallowSizeOf(value);
} else if (count < 100000) {
count++;
- entrySize = RamUsageEstimator.sizeOf(key) + averageSize *
value.size();
+ entrySize = RamUsageEstimator.sizeOf(key) + averageSize *
value.size() + RamUsageEstimator
+ .shallowSizeOf(value);
} else {
averageSize = RamUsageEstimator.shallowSizeOf(value.get(0)) +
RamUsageEstimator
- .shallowSizeOf(value.get(0).getStatistics());
+ .shallowSizeOf(value.get(0).getStatistics()) + RamUsageEstimator
+ .sizeOf(value.get(0).getMeasurementUid()) + RamUsageEstimator
+ .shallowSizeOf(value.get(0).getChunkLoader());
count = 1;
- entrySize = RamUsageEstimator.sizeOf(key) + averageSize *
value.size();
+ entrySize = RamUsageEstimator.sizeOf(key) + averageSize *
value.size() + RamUsageEstimator
+ .shallowSizeOf(value);
}
key.setRAMSize(entrySize);
return entrySize;
@@ -118,8 +125,8 @@ public class ChunkMetadataCache {
return tsFileReader.getChunkMetadataList(seriesPath);
}
- AccountableString key = new AccountableString((filePath +
IoTDBConstant.PATH_SEPARATOR
- + seriesPath.getDevice() + seriesPath.getMeasurement()).intern());
+ AccountableString key = new AccountableString(filePath +
IoTDBConstant.PATH_SEPARATOR
+ + seriesPath.getDevice() + seriesPath.getMeasurement());
cacheRequestNum.incrementAndGet();
@@ -151,7 +158,7 @@ public class ChunkMetadataCache {
List<ChunkMetadata> chunkMetaDataList = FileLoaderUtils
.getChunkMetadataList(seriesPath, filePath);
lruCache.put(key, chunkMetaDataList);
- return chunkMetaDataList;
+ return new ArrayList<>(chunkMetaDataList);
} finally {
lock.writeLock().unlock();
}
diff --git
a/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
b/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
index 5c38fb7..c13fc07 100644
---
a/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
+++
b/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
@@ -33,6 +33,7 @@ public abstract class LRULinkedHashMap<K extends Accountable,
V> {
private static final float LOAD_FACTOR_MAP = 0.75f;
private static final int INITIAL_CAPACITY = 128;
private static final float RETAIN_PERCENT = 0.9f;
+ private static final int MAP_ENTRY_SIZE = 40;
private final LinkedHashMap<K, V> linkedHashMap;
@@ -60,7 +61,7 @@ public abstract class LRULinkedHashMap<K extends Accountable,
V> {
}
public V put(K key, V value) {
- long size = calEntrySize(key, value);
+ long size = calEntrySize(key, value) + MAP_ENTRY_SIZE;
key.setRAMSize(size);
usedMemory += size;
V v = linkedHashMap.put(key, value);
diff --git
a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index d1189d3..33cf9f3 100644
---
a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++
b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -48,12 +48,12 @@ public class TimeSeriesMetadataCache {
private static final IoTDBConfig config =
IoTDBDescriptor.getInstance().getConfig();
private static final long MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE =
config
.getAllocateMemoryForTimeSeriesMetaDataCache();
- private static boolean cacheEnable = config.isMetaDataCacheEnable();
+ private static final boolean cacheEnable = config.isMetaDataCacheEnable();
private final LRULinkedHashMap<TimeSeriesMetadataCacheKey,
TimeseriesMetadata> lruCache;
- private AtomicLong cacheHitNum = new AtomicLong();
- private AtomicLong cacheRequestNum = new AtomicLong();
+ private final AtomicLong cacheHitNum = new AtomicLong();
+ private final AtomicLong cacheRequestNum = new AtomicLong();
private final ReadWriteLock lock = new ReentrantReadWriteLock();
@@ -70,7 +70,10 @@ public class TimeSeriesMetadataCache {
protected long calEntrySize(TimeSeriesMetadataCacheKey key,
TimeseriesMetadata value) {
long currentSize;
if (count < 10) {
- currentSize = RamUsageEstimator.shallowSizeOf(key) +
RamUsageEstimator.sizeOf(value);
+ currentSize = RamUsageEstimator.shallowSizeOf(key) +
RamUsageEstimator.sizeOf(key.device)
+ + RamUsageEstimator.sizeOf(key.measurement) +
RamUsageEstimator.shallowSizeOf(value)
+ + RamUsageEstimator.sizeOf(value.getMeasurementId()) +
RamUsageEstimator
+ .shallowSizeOf(value.getStatistics());
averageSize = ((averageSize * count) + currentSize) / (++count);
} else if (count < 100000) {
count++;
@@ -110,7 +113,7 @@ public class TimeSeriesMetadataCache {
if (lruCache.containsKey(key)) {
cacheHitNum.incrementAndGet();
printCacheLog(true);
- return lruCache.get(key);
+ return new TimeseriesMetadata(lruCache.get(key));
}
} finally {
lock.readLock().unlock();
@@ -121,7 +124,7 @@ public class TimeSeriesMetadataCache {
if (lruCache.containsKey(key)) {
cacheHitNum.incrementAndGet();
printCacheLog(true);
- return lruCache.get(key);
+ return new TimeseriesMetadata(lruCache.get(key));
}
printCacheLog(false);
// bloom filter part
@@ -137,7 +140,7 @@ public class TimeSeriesMetadataCache {
timeSeriesMetadataList.forEach(timeseriesMetadata ->
lruCache.put(new TimeSeriesMetadataCacheKey(key.filePath, key.device,
timeseriesMetadata.getMeasurementId()), timeseriesMetadata));
- return lruCache.get(key);
+ return new TimeseriesMetadata(lruCache.get(key));
} catch (IOException e) {
logger.error("something wrong happened while reading {}", key.filePath);
throw e;
@@ -203,9 +206,9 @@ public class TimeSeriesMetadataCache {
public static class TimeSeriesMetadataCacheKey implements Accountable {
- private String filePath;
- private String device;
- private String measurement;
+ private final String filePath;
+ private final String device;
+ private final String measurement;
private long RAMSize;
@@ -241,7 +244,7 @@ public class TimeSeriesMetadataCache {
@Override
public long getRAMSize() {
- return 0;
+ return RAMSize;
}
}
diff --git
a/server/src/main/java/org/apache/iotdb/db/query/executor/fill/LastPointReader.java
b/server/src/main/java/org/apache/iotdb/db/query/executor/fill/LastPointReader.java
index 2e59c31..ac80a16 100644
---
a/server/src/main/java/org/apache/iotdb/db/query/executor/fill/LastPointReader.java
+++
b/server/src/main/java/org/apache/iotdb/db/query/executor/fill/LastPointReader.java
@@ -103,7 +103,6 @@ public class LastPointReader {
dataType);
} else {
List<ChunkMetadata> seqChunkMetadataList =
timeseriesMetadata.loadChunkMetadataList();
-
for (int i = seqChunkMetadataList.size() - 1; i >= 0; i--) {
lastPoint = getChunkLastPoint(seqChunkMetadataList.get(i));
// last point of this sequence chunk is valid, quit the loop
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
index a45ee7c..58afa42 100644
---
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
+++
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
@@ -218,6 +218,6 @@ public class ChunkMetadata implements Accountable {
@Override
public long getRAMSize() {
- return 0;
+ return RAMSize;
}
}
\ No newline at end of file
diff --git
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
index 3696851..44ce1f5 100644
---
a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
+++
b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
@@ -46,6 +46,17 @@ public class TimeseriesMetadata implements Accountable {
private long RAMSize;
+ public TimeseriesMetadata() {
+ }
+
+ public TimeseriesMetadata(TimeseriesMetadata timeseriesMetadata) {
+ this.startOffsetOfChunkMetaDataList =
timeseriesMetadata.startOffsetOfChunkMetaDataList;
+ this.chunkMetaDataListDataSize =
timeseriesMetadata.chunkMetaDataListDataSize;
+ this.measurementId = timeseriesMetadata.measurementId;
+ this.tsDataType = timeseriesMetadata.tsDataType;
+ this.statistics = timeseriesMetadata.statistics;
+ this.modified = timeseriesMetadata.modified;
+ }
public static TimeseriesMetadata deserializeFrom(ByteBuffer buffer) {
TimeseriesMetadata timeseriesMetaData = new TimeseriesMetadata();
@@ -136,6 +147,6 @@ public class TimeseriesMetadata implements Accountable {
@Override
public long getRAMSize() {
- return 0;
+ return RAMSize;
}
}