I noticed that the data max-points-per-block configuration option is being
ignored by tsm1 engine in several places for the default max points per block
(1000) and not sure if this is a bug or not.
file: tsdb/engine/tsm1/compact.go
289 // Skip the file if it's over the max size and contains a full
block and it does not have any tombstones
290 if group.size() > uint64(maxTSMFileSize) &&
c.FileStore.BlockCount(group.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock
&& !group.hasTombstones() {
291 skip = true
292 }
357 // Skip the file if it's over the max size and contains a full
block or the generation is split
358 // over multiple files. In the latter case, that would mean the
data in the file spilled over
359 // the 2GB limit.
360 if g.size() > uint64(maxTSMFileSize) &&
c.FileStore.BlockCount(g.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock ||
g.count() > 1 {
361 start = i + 1
362 }
403 // Skip the file if it's over the max size and it contains a
full block
404 if gen.size() >= uint64(maxTSMFileSize) &&
c.FileStore.BlockCount(gen.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock
&& !gen.hasTombstones() {
405 startIndex++
406 continue
407 }
520 // WriteSnapshot will write a Cache snapshot to a new TSM files.
521 func (c *Compactor) WriteSnapshot(cache *Cache) ([]string, error) {
522 c.mu.RLock()
523 opened := c.opened
524 c.mu.RUnlock()
525
526 if !opened {
527 return nil, errSnapshotsDisabled
528 }
529
530 iter := NewCacheKeyIterator(cache, tsdb.DefaultMaxPointsPerBlock)
545 // Compact will write multiple smaller TSM files into 1 or more larger
files
546 func (c *Compactor) compact(fast bool, tsmFiles []string) ([]string,
error) {
547 size := c.Size
548 if size <= 0 {
549 size = tsdb.DefaultMaxPointsPerBlock
550 }
--
Remember to include the InfluxDB version number with all issue reports
---
You received this message because you are subscribed to the Google Groups
"InfluxDB" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To post to this group, send email to [email protected].
Visit this group at https://groups.google.com/group/influxdb.
To view this discussion on the web visit
https://groups.google.com/d/msgid/influxdb/ac72f365-77fc-4b40-8b89-5b2f0a159ada%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.