Repository: cassandra Updated Branches: refs/heads/trunk 43ff27fc7 -> db68ac9fd
Print sensible units for all log messages patch by Giampaolo Trapasso; reviewed by Joel Knighton for CASSANDRA-9692 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/db68ac9f Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/db68ac9f Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/db68ac9f Branch: refs/heads/trunk Commit: db68ac9fd791d777df0241ef9bd381eca46bbd20 Parents: 43ff27f Author: Giampaolo Trapasso <[email protected]> Authored: Thu Feb 18 16:37:33 2016 +0100 Committer: Aleksey Yeschenko <[email protected]> Committed: Wed Mar 30 20:42:15 2016 +0100 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../cassandra/cache/SerializingCache.java | 3 +- .../cassandra/config/DatabaseDescriptor.java | 4 +-- .../apache/cassandra/cql3/QueryProcessor.java | 4 +-- .../cql3/statements/BatchStatement.java | 10 ++++-- .../apache/cassandra/db/ColumnFamilyStore.java | 16 ++++++++-- .../cassandra/db/commitlog/CommitLog.java | 6 ++-- .../db/compaction/CompactionManager.java | 5 +-- .../cassandra/db/compaction/CompactionTask.java | 24 ++++++++++---- .../db/compaction/LeveledManifest.java | 8 +++-- .../cassandra/db/compaction/Scrubber.java | 5 +-- .../cassandra/db/compaction/Verifier.java | 4 +-- .../writers/CompactionAwareWriter.java | 9 ++++-- .../apache/cassandra/gms/TokenSerializer.java | 4 ++- .../index/sasi/disk/OnDiskIndexBuilder.java | 4 ++- .../index/sasi/disk/PerSSTableIndexWriter.java | 10 +++--- .../index/sasi/memory/IndexMemtable.java | 5 +-- .../index/sasi/memory/TrieMemIndex.java | 7 +++-- .../cassandra/index/sasi/plan/Expression.java | 5 +-- .../io/sstable/IndexSummaryRedistribution.java | 16 +++++----- .../sstable/format/RangeAwareSSTableWriter.java | 4 ++- .../io/sstable/format/SSTableReader.java | 6 ++-- .../io/sstable/format/big/BigTableWriter.java | 2 +- .../cassandra/io/util/DiskAwareRunnable.java | 4 ++- .../org/apache/cassandra/io/util/FileUtils.java | 8 ++--- .../apache/cassandra/repair/LocalSyncTask.java | 6 ++-- .../cassandra/service/CassandraDaemon.java | 4 ++- .../cassandra/streaming/StreamReader.java | 4 +-- .../cassandra/streaming/StreamResultFuture.java | 8 +++-- .../cassandra/streaming/StreamWriter.java | 3 +- .../compress/CompressedStreamReader.java | 3 +- .../compress/CompressedStreamWriter.java | 3 +- .../org/apache/cassandra/tools/BulkLoader.java | 33 ++++++++++---------- .../tools/nodetool/CompactionStats.java | 2 +- .../cassandra/tools/nodetool/NetStats.java | 2 +- .../cassandra/tools/nodetool/TableStats.java | 2 +- .../org/apache/cassandra/utils/FBUtilities.java | 20 ++++++++++++ .../cassandra/utils/memory/BufferPool.java | 16 ++++++---- 38 files changed, 184 insertions(+), 96 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index 8269992..4650cf6 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,4 +1,5 @@ 3.6 + * Print sensible units for all log messages (CASSANDRA-9692) * Upgrade Netty to version 4.0.34 (CASSANDRA-11096) * Break the CQL grammar into separate Parser and Lexer (CASSANDRA-11372) * Compress only inter-dc traffic by default (CASSANDRA-8888) http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/cache/SerializingCache.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/cache/SerializingCache.java b/src/java/org/apache/cassandra/cache/SerializingCache.java index 3651a0c..0ece686 100644 --- a/src/java/org/apache/cassandra/cache/SerializingCache.java +++ b/src/java/org/apache/cassandra/cache/SerializingCache.java @@ -31,6 +31,7 @@ import org.apache.cassandra.io.ISerializer; import org.apache.cassandra.io.util.MemoryInputStream; import org.apache.cassandra.io.util.MemoryOutputStream; import org.apache.cassandra.io.util.WrappedDataOutputStreamPlus; +import org.apache.cassandra.utils.FBUtilities; /** * Serializes cache values off-heap. @@ -99,7 +100,7 @@ public class SerializingCache<K, V> implements ICache<K, V> { long serializedSize = serializer.serializedSize(value); if (serializedSize > Integer.MAX_VALUE) - throw new IllegalArgumentException("Unable to allocate " + serializedSize + " bytes"); + throw new IllegalArgumentException(String.format("Unable to allocate %s", FBUtilities.prettyPrintMemory(serializedSize))); RefCountedMemory freeableMemory; try http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/config/DatabaseDescriptor.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java index 89ac324..6a3a4c5 100644 --- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java @@ -593,8 +593,8 @@ public class DatabaseDescriptor } } if (dataFreeBytes < 64L * 1024 * 1048576) // 64 GB - logger.warn("Only {} MB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots", - dataFreeBytes / 1048576); + logger.warn("Only {} free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots", + FBUtilities.prettyPrintMemory(dataFreeBytes)); if (conf.commitlog_directory.equals(conf.saved_caches_directory)) http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/cql3/QueryProcessor.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java index 9ee3e17..9801b41 100644 --- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java +++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java @@ -131,9 +131,9 @@ public class QueryProcessor implements QueryHandler { long count = lastMinuteEvictionsCount.getAndSet(0); if (count > 0) - logger.info("{} prepared statements discarded in the last minute because cache limit reached ({} bytes)", + logger.info("{} prepared statements discarded in the last minute because cache limit reached ({})", count, - MAX_CACHE_PREPARED_MEMORY); + FBUtilities.prettyPrintMemory(MAX_CACHE_PREPARED_MEMORY)); } }, 1, 1, TimeUnit.MINUTES); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java index 058969b..eb69025 100644 --- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java @@ -40,6 +40,7 @@ import org.apache.cassandra.exceptions.*; import org.apache.cassandra.service.*; import org.apache.cassandra.tracing.Tracing; import org.apache.cassandra.transport.messages.ResultMessage; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.NoSpamLogger; import org.apache.cassandra.utils.Pair; @@ -290,13 +291,16 @@ public class BatchStatement implements CQLStatement String format = "Batch for {} is of size {}, exceeding specified threshold of {} by {}.{}"; if (size > failThreshold) { - Tracing.trace(format, tableNames, size, failThreshold, size - failThreshold, " (see batch_size_fail_threshold_in_kb)"); - logger.error(format, tableNames, size, failThreshold, size - failThreshold, " (see batch_size_fail_threshold_in_kb)"); + Tracing.trace(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold), + FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)"); + logger.error(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold), + FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)"); throw new InvalidRequestException("Batch too large"); } else if (logger.isWarnEnabled()) { - logger.warn(format, tableNames, size, warnThreshold, size - warnThreshold, ""); + logger.warn(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(warnThreshold), + FBUtilities.prettyPrintMemory(size - warnThreshold), ""); } ClientWarn.instance.warn(MessageFormatter.arrayFormat(format, new Object[] {tableNames, size, warnThreshold, size - warnThreshold, ""}).getMessage()); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/ColumnFamilyStore.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index 31f3528..a6645b5 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -851,8 +851,13 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean offHeapTotal += allocator.offHeap().owns(); } - logger.debug("Enqueuing flush of {}: {}", name, String.format("%d (%.0f%%) on-heap, %d (%.0f%%) off-heap", - onHeapTotal, onHeapRatio * 100, offHeapTotal, offHeapRatio * 100)); + logger.debug("Enqueuing flush of {}: {}", + name, + String.format("%s (%.0f%%) on-heap, %s (%.0f%%) off-heap", + FBUtilities.prettyPrintMemory(onHeapTotal), + onHeapRatio * 100, + FBUtilities.prettyPrintMemory(offHeapTotal), + offHeapRatio * 100)); } @@ -1125,7 +1130,12 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean } memtable.cfs.replaceFlushed(memtable, sstables); reclaim(memtable); - logger.debug("Flushed to {} ({} sstables, {} bytes), biggest {} bytes, smallest {} bytes", sstables, sstables.size(), totalBytesOnDisk, maxBytesOnDisk, minBytesOnDisk); + logger.debug("Flushed to {} ({} sstables, {}), biggest {}, smallest {}", + sstables, + sstables.size(), + FBUtilities.prettyPrintMemory(totalBytesOnDisk), + FBUtilities.prettyPrintMemory(maxBytesOnDisk), + FBUtilities.prettyPrintMemory(minBytesOnDisk)); } // signal the post-flush we've done our work postFlush.latch.countDown(); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/commitlog/CommitLog.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java index 0c6a6cb..fc917d0 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java @@ -46,6 +46,7 @@ import org.apache.cassandra.metrics.CommitLogMetrics; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.security.EncryptionContext; import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.JVMStabilityInspector; import static org.apache.cassandra.db.commitlog.CommitLogSegment.*; @@ -264,8 +265,9 @@ public class CommitLog implements CommitLogMBean int totalSize = size + ENTRY_OVERHEAD_SIZE; if (totalSize > MAX_MUTATION_SIZE) { - throw new IllegalArgumentException(String.format("Mutation of %s bytes is too large for the maxiumum size of %s", - totalSize, MAX_MUTATION_SIZE)); + throw new IllegalArgumentException(String.format("Mutation of %s is too large for the maxiumum size of %s", + FBUtilities.prettyPrintMemory(totalSize), + FBUtilities.prettyPrintMemory(MAX_MUTATION_SIZE))); } Allocation alloc = allocator.allocate(mutation, (int) totalSize); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/CompactionManager.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java index f9b0997..ebf66cf 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -1020,14 +1020,15 @@ public class CompactionManager implements CompactionManagerMBean if (!finished.isEmpty()) { - String format = "Cleaned up to %s. %,d to %,d (~%d%% of original) bytes for %,d keys. Time: %,dms."; + String format = "Cleaned up to %s. %s to %s (~%d%% of original) for %,d keys. Time: %,dms."; long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); long startsize = sstable.onDiskLength(); long endsize = 0; for (SSTableReader newSstable : finished) endsize += newSstable.onDiskLength(); double ratio = (double) endsize / (double) startsize; - logger.info(String.format(format, finished.get(0).getFilename(), startsize, endsize, (int) (ratio * 100), totalkeysWritten, dTime)); + logger.info(String.format(format, finished.get(0).getFilename(), FBUtilities.prettyPrintMemory(startsize), + FBUtilities.prettyPrintMemory(endsize), (int) (ratio * 100), totalkeysWritten, dTime)); } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/CompactionTask.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java index 4cbfc28..1465ba4 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java @@ -88,7 +88,7 @@ public class CompactionTask extends AbstractCompactionTask if (partialCompactionsAcceptable() && transaction.originals().size() > 1) { // Try again w/o the largest one. - logger.warn("insufficient space to compact all requested files {}", StringUtils.join(transaction.originals(), ", ")); + logger.warn("Insufficient space to compact all requested files {}", StringUtils.join(transaction.originals(), ", ")); // Note that we have removed files that are still marked as compacting. // This suboptimal but ok since the caller will unmark all the sstables at the end. SSTableReader removedSSTable = cfs.getMaxSizeFile(transaction.originals()); @@ -206,7 +206,9 @@ public class CompactionTask extends AbstractCompactionTask } // log a bunch of statistics about the result and save to system table compaction_history - long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); + + long durationInNano = System.nanoTime() - start; + long dTime = TimeUnit.NANOSECONDS.toMillis(durationInNano); long startsize = SSTableReader.getTotalBytes(transaction.originals()); long endsize = SSTableReader.getTotalBytes(newSStables); double ratio = (double) endsize / (double) startsize; @@ -215,12 +217,22 @@ public class CompactionTask extends AbstractCompactionTask for (SSTableReader reader : newSStables) newSSTableNames.append(reader.descriptor.baseFilename()).append(","); - double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / ((double) dTime / 1000) : 0; long totalSourceRows = 0; String mergeSummary = updateCompactionHistory(cfs.keyspace.getName(), cfs.getColumnFamilyName(), mergedRowCounts, startsize, endsize); - logger.debug(String.format("Compacted (%s) %d sstables to [%s] to level=%d. %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s. %,d total partitions merged to %,d. Partition merge counts were {%s}", - taskId, transaction.originals().size(), newSSTableNames.toString(), getLevel(), startsize, endsize, (int) (ratio * 100), dTime, mbps, totalSourceRows, totalKeysWritten, mergeSummary)); - logger.trace(String.format("CF Total Bytes Compacted: %,d", CompactionTask.addToTotalBytesCompacted(endsize))); + logger.debug(String.format("Compacted (%s) %d sstables to [%s] to level=%d. %s to %s (~%d%% of original) in %,dms. Throughput = %s. %,d total partitions merged to %,d. Partition merge counts were {%s}", + taskId, + transaction.originals().size(), + newSSTableNames.toString(), + getLevel(), + FBUtilities.prettyPrintMemory(startsize), + FBUtilities.prettyPrintMemory(endsize), + (int) (ratio * 100), + dTime, + FBUtilities.prettyPrintMemoryPerSecond(endsize, durationInNano), + totalSourceRows, + totalKeysWritten, + mergeSummary)); + logger.trace(String.format("CF Total Bytes Compacted: %s", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize)))); logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - estimatedKeys)/totalKeysWritten)); // update the metrics http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java index 3246f20..3a1f475 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java @@ -39,6 +39,7 @@ import org.apache.cassandra.dht.Bounds; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; public class LeveledManifest @@ -471,8 +472,11 @@ public class LeveledManifest { if (!getLevel(i).isEmpty()) { - logger.trace("L{} contains {} SSTables ({} bytes) in {}", - i, getLevel(i).size(), SSTableReader.getTotalBytes(getLevel(i)), this); + logger.trace("L{} contains {} SSTables ({}) in {}", + i, + getLevel(i).size(), + FBUtilities.prettyPrintMemory(SSTableReader.getTotalBytes(getLevel(i))), + this); } } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/Scrubber.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java index 5109036..187caa3 100644 --- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java +++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java @@ -35,6 +35,7 @@ import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.service.ActiveRepairService; import org.apache.cassandra.utils.ByteBufferUtil; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.JVMStabilityInspector; import org.apache.cassandra.utils.OutputHandler; import org.apache.cassandra.utils.UUIDGen; @@ -143,7 +144,7 @@ public class Scrubber implements Closeable { List<SSTableReader> finished = new ArrayList<>(); boolean completed = false; - outputHandler.output(String.format("Scrubbing %s (%s bytes)", sstable, dataFile.length())); + outputHandler.output(String.format("Scrubbing %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); try (SSTableRewriter writer = SSTableRewriter.constructKeepingOriginals(transaction, false, sstable.maxDataAge)) { nextIndexKey = indexAvailable() ? ByteBufferUtil.readWithShortLength(indexFile) : null; @@ -191,7 +192,7 @@ public class Scrubber implements Closeable // avoid an NPE if key is null String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.getKey()); - outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSizeFromIndex)); + outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex))); assert currentIndexKey != null || !indexAvailable(); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/Verifier.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/Verifier.java b/src/java/org/apache/cassandra/db/compaction/Verifier.java index ce04ad3..227b209 100644 --- a/src/java/org/apache/cassandra/db/compaction/Verifier.java +++ b/src/java/org/apache/cassandra/db/compaction/Verifier.java @@ -87,7 +87,7 @@ public class Verifier implements Closeable { long rowStart = 0; - outputHandler.output(String.format("Verifying %s (%s bytes)", sstable, dataFile.length())); + outputHandler.output(String.format("Verifying %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); outputHandler.output(String.format("Checking computed hash of %s ", sstable)); @@ -177,7 +177,7 @@ public class Verifier implements Closeable long dataSize = nextRowPositionFromIndex - dataStartFromIndex; // avoid an NPE if key is null String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.getKey()); - outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize)); + outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSize))); assert currentIndexKey != null || indexFile.isEOF(); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java index 13d76a8..89f3140 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java @@ -35,6 +35,7 @@ import org.apache.cassandra.db.compaction.CompactionTask; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; import org.apache.cassandra.io.sstable.SSTableRewriter; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.concurrent.Transactional; import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.service.StorageService; @@ -213,13 +214,17 @@ public abstract class CompactionAwareWriter extends Transactional.AbstractTransa if (d != null) { if (d.getAvailableSpace() < estimatedWriteSize) - throw new RuntimeException(String.format("Not enough space to write %d bytes to %s (%d bytes available)", estimatedWriteSize, d.location, d.getAvailableSpace())); + throw new RuntimeException(String.format("Not enough space to write %s to %s (%s available)", + FBUtilities.prettyPrintMemory(estimatedWriteSize), + d.location, + FBUtilities.prettyPrintMemory(d.getAvailableSpace()))); logger.trace("putting compaction results in {}", directory); return d; } d = getDirectories().getWriteableLocation(estimatedWriteSize); if (d == null) - throw new RuntimeException("Not enough disk space to store "+estimatedWriteSize+" bytes"); + throw new RuntimeException(String.format("Not enough disk space to store %s", + FBUtilities.prettyPrintMemory(estimatedWriteSize))); return d; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/gms/TokenSerializer.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/gms/TokenSerializer.java b/src/java/org/apache/cassandra/gms/TokenSerializer.java index da4ee7d..41bd821 100644 --- a/src/java/org/apache/cassandra/gms/TokenSerializer.java +++ b/src/java/org/apache/cassandra/gms/TokenSerializer.java @@ -20,6 +20,8 @@ package org.apache.cassandra.gms; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Token; import org.apache.cassandra.utils.ByteBufferUtil; +import org.apache.cassandra.utils.FBUtilities; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +54,7 @@ public class TokenSerializer int size = in.readInt(); if (size < 1) break; - logger.trace("Reading token of {} bytes", size); + logger.trace("Reading token of {}", FBUtilities.prettyPrintMemory(size)); byte[] bintoken = new byte[size]; in.readFully(bintoken); tokens.add(partitioner.getTokenFactory().fromByteArray(ByteBuffer.wrap(bintoken))); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java index 528cd62..888dc63 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java @@ -155,7 +155,9 @@ public class OnDiskIndexBuilder { if (term.remaining() >= MAX_TERM_SIZE) { - logger.error("Rejecting value (value size {}, maximum size {} bytes).", term.remaining(), Short.MAX_VALUE); + logger.error("Rejecting value (value size {}, maximum size {}).", + FBUtilities.prettyPrintMemory(term.remaining()), + FBUtilities.prettyPrintMemory(Short.MAX_VALUE)); return this; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java index e7be57c..6a46338 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java @@ -212,9 +212,9 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver if (token.remaining() >= OnDiskIndexBuilder.MAX_TERM_SIZE) { - logger.info("Rejecting value (size {}, maximum {} bytes) for column {} (analyzed {}) at {} SSTable.", - term.remaining(), - OnDiskIndexBuilder.MAX_TERM_SIZE, + logger.info("Rejecting value (size {}, maximum {}) for column {} (analyzed {}) at {} SSTable.", + FBUtilities.prettyPrintMemory(term.remaining()), + FBUtilities.prettyPrintMemory(OnDiskIndexBuilder.MAX_TERM_SIZE), columnIndex.getColumnName(), columnIndex.getMode().isAnalyzed, descriptor); @@ -225,11 +225,11 @@ public class PerSSTableIndexWriter implements SSTableFlushObserver { if ((token = TypeUtil.tryUpcast(token, columnIndex.getValidator())) == null) { - logger.info("({}) Failed to add {} to index for key: {}, value size was {} bytes, validator is {}.", + logger.info("({}) Failed to add {} to index for key: {}, value size was {}, validator is {}.", outputFile, columnIndex.getColumnName(), keyValidator.getString(key.getKey()), - size, + FBUtilities.prettyPrintMemory(size), columnIndex.getValidator()); continue; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/index/sasi/memory/IndexMemtable.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/index/sasi/memory/IndexMemtable.java b/src/java/org/apache/cassandra/index/sasi/memory/IndexMemtable.java index cf7f3a5..e55a806 100644 --- a/src/java/org/apache/cassandra/index/sasi/memory/IndexMemtable.java +++ b/src/java/org/apache/cassandra/index/sasi/memory/IndexMemtable.java @@ -26,6 +26,7 @@ import org.apache.cassandra.index.sasi.disk.Token; import org.apache.cassandra.index.sasi.plan.Expression; import org.apache.cassandra.index.sasi.utils.RangeIterator; import org.apache.cassandra.index.sasi.utils.TypeUtil; +import org.apache.cassandra.utils.FBUtilities; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,10 +53,10 @@ public class IndexMemtable int size = value.remaining(); if ((value = TypeUtil.tryUpcast(value, validator)) == null) { - logger.error("Can't add column {} to index for key: {}, value size {} bytes, validator: {}.", + logger.error("Can't add column {} to index for key: {}, value size {}, validator: {}.", index.columnIndex.getColumnName(), index.columnIndex.keyValidator().getString(key.getKey()), - size, + FBUtilities.prettyPrintMemory(size), validator); return 0; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/index/sasi/memory/TrieMemIndex.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/index/sasi/memory/TrieMemIndex.java b/src/java/org/apache/cassandra/index/sasi/memory/TrieMemIndex.java index 0da65c7..7c72bb7 100644 --- a/src/java/org/apache/cassandra/index/sasi/memory/TrieMemIndex.java +++ b/src/java/org/apache/cassandra/index/sasi/memory/TrieMemIndex.java @@ -38,6 +38,7 @@ import com.googlecode.concurrenttrees.radix.ConcurrentRadixTree; import com.googlecode.concurrenttrees.suffix.ConcurrentSuffixTree; import com.googlecode.concurrenttrees.radix.node.concrete.SmartArrayBasedNodeFactory; import com.googlecode.concurrenttrees.radix.node.Node; +import org.apache.cassandra.utils.FBUtilities; import org.slf4j.Logger; @@ -82,11 +83,11 @@ public class TrieMemIndex extends MemIndex if (term.remaining() >= OnDiskIndexBuilder.MAX_TERM_SIZE) { - logger.info("Can't add term of column {} to index for key: {}, term size {} bytes, max allowed size {} bytes, use analyzed = true (if not yet set) for that column.", + logger.info("Can't add term of column {} to index for key: {}, term size {}, max allowed size {}, use analyzed = true (if not yet set) for that column.", columnIndex.getColumnName(), keyValidator.getString(key.getKey()), - term.remaining(), - OnDiskIndexBuilder.MAX_TERM_SIZE); + FBUtilities.prettyPrintMemory(term.remaining()), + FBUtilities.prettyPrintMemory(OnDiskIndexBuilder.MAX_TERM_SIZE)); continue; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/index/sasi/plan/Expression.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/index/sasi/plan/Expression.java b/src/java/org/apache/cassandra/index/sasi/plan/Expression.java index 679d866..f2fd02a 100644 --- a/src/java/org/apache/cassandra/index/sasi/plan/Expression.java +++ b/src/java/org/apache/cassandra/index/sasi/plan/Expression.java @@ -31,6 +31,7 @@ import org.apache.cassandra.index.sasi.utils.TypeUtil; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.UTF8Type; import org.apache.cassandra.utils.ByteBufferUtil; +import org.apache.cassandra.utils.FBUtilities; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -195,10 +196,10 @@ public class Expression int size = value.remaining(); if ((value = TypeUtil.tryUpcast(value, validator)) == null) { - logger.error("Can't cast value for {} to size accepted by {}, value size is {} bytes.", + logger.error("Can't cast value for {} to size accepted by {}, value size is {}.", index.getColumnName(), validator, - size); + FBUtilities.prettyPrintMemory(size)); return false; } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java index b4eae31..8fb4835 100644 --- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java +++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java @@ -40,6 +40,7 @@ import org.apache.cassandra.db.compaction.CompactionInterruptedException; import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL; @@ -131,8 +132,8 @@ public class IndexSummaryRedistribution extends CompactionInfo.Holder total = 0; for (SSTableReader sstable : Iterables.concat(compacting, oldFormatSSTables, newSSTables)) total += sstable.getIndexSummaryOffHeapSize(); - logger.trace("Completed resizing of index summaries; current approximate memory used: {} MB", - total / 1024.0 / 1024.0); + logger.trace("Completed resizing of index summaries; current approximate memory used: {}", + FBUtilities.prettyPrintMemory(total)); return newSSTables; } @@ -183,11 +184,12 @@ public class IndexSummaryRedistribution extends CompactionInfo.Holder int numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, maxSummarySize); double effectiveIndexInterval = sstable.getEffectiveIndexInterval(); - logger.trace("{} has {} reads/sec; ideal space for index summary: {} bytes ({} entries); considering moving " + - "from level {} ({} entries, {} bytes) to level {} ({} entries, {} bytes)", - sstable.getFilename(), readsPerSec, idealSpace, targetNumEntries, currentSamplingLevel, currentNumEntries, - currentNumEntries * avgEntrySize, newSamplingLevel, numEntriesAtNewSamplingLevel, - numEntriesAtNewSamplingLevel * avgEntrySize); + logger.trace("{} has {} reads/sec; ideal space for index summary: {} ({} entries); considering moving " + + "from level {} ({} entries, {}) " + + "to level {} ({} entries, {})", + sstable.getFilename(), readsPerSec, FBUtilities.prettyPrintMemory(idealSpace), targetNumEntries, + currentSamplingLevel, currentNumEntries, FBUtilities.prettyPrintMemory((long) (currentNumEntries * avgEntrySize)), + newSamplingLevel, numEntriesAtNewSamplingLevel, FBUtilities.prettyPrintMemory((long) (numEntriesAtNewSamplingLevel * avgEntrySize))); if (effectiveIndexInterval < minIndexInterval) { http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java index 9fcdfa4..3665da7 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java @@ -33,6 +33,7 @@ import org.apache.cassandra.db.rows.UnfilteredRowIterator; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.FBUtilities; public class RangeAwareSSTableWriter implements SSTableMultiWriter { @@ -65,7 +66,8 @@ public class RangeAwareSSTableWriter implements SSTableMultiWriter { Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize); if (localDir == null) - throw new IOException("Insufficient disk space to store " + totalSize + " bytes"); + throw new IOException(String.format("Insufficient disk space to store %s", + FBUtilities.prettyPrintMemory(totalSize))); Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(cfs.getDirectories().getLocationForDisk(localDir), format)); currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, sstableLevel, header, txn); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java index ccd20f5..99adb8d 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java @@ -421,7 +421,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS System.exit(1); } - logger.debug("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length()); + long fileLength = new File(descriptor.filenameFor(Component.DATA)).length(); + logger.debug("Opening {} ({})", descriptor, FBUtilities.prettyPrintMemory(fileLength)); SSTableReader sstable = internalOpen(descriptor, components, metadata, @@ -477,7 +478,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS System.exit(1); } - logger.debug("Opening {} ({} bytes)", descriptor, new File(descriptor.filenameFor(Component.DATA)).length()); + long fileLength = new File(descriptor.filenameFor(Component.DATA)).length(); + logger.debug("Opening {} ({})", descriptor, FBUtilities.prettyPrintMemory(fileLength)); SSTableReader sstable = internalOpen(descriptor, components, metadata, http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java index 194801c..42f923a 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java @@ -172,7 +172,7 @@ public class BigTableWriter extends SSTableWriter if (rowSize > DatabaseDescriptor.getCompactionLargePartitionWarningThreshold()) { String keyString = metadata.getKeyValidator().getString(key.getKey()); - logger.warn("Writing large partition {}/{}:{} ({} bytes)", metadata.ksName, metadata.cfName, keyString, rowSize); + logger.warn("Writing large partition {}/{}:{} ({})", metadata.ksName, metadata.cfName, keyString, FBUtilities.prettyPrintMemory(rowSize)); } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java index a083218..1fb4885 100644 --- a/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java +++ b/src/java/org/apache/cassandra/io/util/DiskAwareRunnable.java @@ -18,6 +18,7 @@ package org.apache.cassandra.io.util; import org.apache.cassandra.db.Directories; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.WrappedRunnable; public abstract class DiskAwareRunnable extends WrappedRunnable @@ -31,7 +32,8 @@ public abstract class DiskAwareRunnable extends WrappedRunnable directory = getDirectories().getWriteableLocation(writeSize); if (directory == null) - throw new RuntimeException("Insufficient disk space to write " + writeSize + " bytes"); + throw new RuntimeException(String.format("Insufficient disk space to write %s", + FBUtilities.prettyPrintMemory(writeSize))); return directory; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/io/util/FileUtils.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java index 75a6762..a076bbd 100644 --- a/src/java/org/apache/cassandra/io/util/FileUtils.java +++ b/src/java/org/apache/cassandra/io/util/FileUtils.java @@ -399,25 +399,25 @@ public class FileUtils { d = value / TB; String val = df.format(d); - return val + " TB"; + return val + " TiB"; } else if ( value >= GB ) { d = value / GB; String val = df.format(d); - return val + " GB"; + return val + " GiB"; } else if ( value >= MB ) { d = value / MB; String val = df.format(d); - return val + " MB"; + return val + " MiB"; } else if ( value >= KB ) { d = value / KB; String val = df.format(d); - return val + " KB"; + return val + " KiB"; } else { http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/repair/LocalSyncTask.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/repair/LocalSyncTask.java b/src/java/org/apache/cassandra/repair/LocalSyncTask.java index e1da497..a92708f 100644 --- a/src/java/org/apache/cassandra/repair/LocalSyncTask.java +++ b/src/java/org/apache/cassandra/repair/LocalSyncTask.java @@ -98,9 +98,9 @@ public class LocalSyncTask extends SyncTask implements StreamEventHandler break; case FILE_PROGRESS: ProgressInfo pi = ((StreamEvent.ProgressEvent) event).progress; - state.trace("{}/{} bytes ({}%) {} idx:{}{}", - new Object[] { pi.currentBytes, - pi.totalBytes, + state.trace("{}/{} ({}%) {} idx:{}{}", + new Object[] { FBUtilities.prettyPrintMemory(pi.currentBytes), + FBUtilities.prettyPrintMemory(pi.totalBytes), pi.currentBytes * 100 / pi.totalBytes, pi.direction == ProgressInfo.Direction.OUT ? "sent to" : "received from", pi.sessionIndex, http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/service/CassandraDaemon.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java index b84a5e3..f0f790a 100644 --- a/src/java/org/apache/cassandra/service/CassandraDaemon.java +++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java @@ -431,7 +431,9 @@ public class CassandraDaemon } logger.info("JVM vendor/version: {}/{}", System.getProperty("java.vm.name"), System.getProperty("java.version")); - logger.info("Heap size: {}/{}", Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory()); + logger.info("Heap size: {}/{}", + FBUtilities.prettyPrintMemory(Runtime.getRuntime().totalMemory()), + FBUtilities.prettyPrintMemory(Runtime.getRuntime().maxMemory())); for(MemoryPoolMXBean pool: ManagementFactory.getMemoryPoolMXBeans()) logger.info("{} {}: {}", pool.getName(), pool.getType(), pool.getPeakUsage()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/streaming/StreamReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/streaming/StreamReader.java b/src/java/org/apache/cassandra/streaming/StreamReader.java index 7d7cf8a..0cd6329 100644 --- a/src/java/org/apache/cassandra/streaming/StreamReader.java +++ b/src/java/org/apache/cassandra/streaming/StreamReader.java @@ -120,7 +120,7 @@ public class StreamReader session.progress(writer.getFilename(), ProgressInfo.Direction.IN, in.getBytesRead(), totalSize); } logger.debug("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", - session.planId(), fileSeqNum, session.peer, in.getBytesRead(), totalSize); + session.planId(), fileSeqNum, session.peer, FBUtilities.prettyPrintMemory(in.getBytesRead()), FBUtilities.prettyPrintMemory(totalSize)); return writer; } catch (Throwable e) @@ -154,7 +154,7 @@ public class StreamReader { Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize); if (localDir == null) - throw new IOException("Insufficient disk space to store " + totalSize + " bytes"); + throw new IOException(String.format("Insufficient disk space to store %s", FBUtilities.prettyPrintMemory(totalSize))); RangeAwareSSTableWriter writer = new RangeAwareSSTableWriter(cfs, estimatedKeys, repairedAt, format, sstableLevel, totalSize, session.getTransaction(cfId), getHeader(cfs.metadata)); StreamHook.instance.reportIncomingFile(cfs, writer, session, fileSeqNum); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/streaming/StreamResultFuture.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/streaming/StreamResultFuture.java b/src/java/org/apache/cassandra/streaming/StreamResultFuture.java index 2297c83..ab297ed 100644 --- a/src/java/org/apache/cassandra/streaming/StreamResultFuture.java +++ b/src/java/org/apache/cassandra/streaming/StreamResultFuture.java @@ -28,6 +28,8 @@ import com.google.common.util.concurrent.Futures; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.cassandra.utils.FBUtilities; + /** * A future on the result ({@link StreamState}) of a streaming plan. * @@ -168,13 +170,13 @@ public final class StreamResultFuture extends AbstractFuture<StreamState> void handleSessionPrepared(StreamSession session) { SessionInfo sessionInfo = session.getSessionInfo(); - logger.info("[Stream #{} ID#{}] Prepare completed. Receiving {} files({} bytes), sending {} files({} bytes)", + logger.info("[Stream #{} ID#{}] Prepare completed. Receiving {} files({}), sending {} files({})", session.planId(), session.sessionIndex(), sessionInfo.getTotalFilesToReceive(), - sessionInfo.getTotalSizeToReceive(), + FBUtilities.prettyPrintMemory(sessionInfo.getTotalSizeToReceive()), sessionInfo.getTotalFilesToSend(), - sessionInfo.getTotalSizeToSend()); + FBUtilities.prettyPrintMemory(sessionInfo.getTotalSizeToSend())); StreamEvent.SessionPreparedEvent event = new StreamEvent.SessionPreparedEvent(planId, sessionInfo); coordinator.addSessionInfo(sessionInfo); fireStreamEvent(event); http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/streaming/StreamWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/streaming/StreamWriter.java b/src/java/org/apache/cassandra/streaming/StreamWriter.java index 0cedade..1d30419 100644 --- a/src/java/org/apache/cassandra/streaming/StreamWriter.java +++ b/src/java/org/apache/cassandra/streaming/StreamWriter.java @@ -35,6 +35,7 @@ import org.apache.cassandra.io.util.DataOutputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.streaming.StreamManager.StreamRateLimiter; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; /** @@ -116,7 +117,7 @@ public class StreamWriter compressedOutput.flush(); } logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}", - session.planId(), sstable.getFilename(), session.peer, progress, totalSize); + session.planId(), sstable.getFilename(), session.peer, FBUtilities.prettyPrintMemory(progress), FBUtilities.prettyPrintMemory(totalSize)); } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java index 318484f..5aa393e 100644 --- a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java +++ b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java @@ -39,6 +39,7 @@ import org.apache.cassandra.streaming.StreamReader; import org.apache.cassandra.streaming.StreamSession; import org.apache.cassandra.streaming.messages.FileMessageHeader; import org.apache.cassandra.io.util.TrackedInputStream; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; /** @@ -110,7 +111,7 @@ public class CompressedStreamReader extends StreamReader } } logger.debug("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", session.planId(), fileSeqNum, - session.peer, cis.getTotalCompressedBytesRead(), totalSize); + session.peer, FBUtilities.prettyPrintMemory(cis.getTotalCompressedBytesRead()), FBUtilities.prettyPrintMemory(totalSize)); return writer; } catch (Throwable e) http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/streaming/compress/CompressedStreamWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamWriter.java b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamWriter.java index 657da88..900c1ad 100644 --- a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamWriter.java +++ b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamWriter.java @@ -38,6 +38,7 @@ import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.streaming.ProgressInfo; import org.apache.cassandra.streaming.StreamSession; import org.apache.cassandra.streaming.StreamWriter; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; /** @@ -93,7 +94,7 @@ public class CompressedStreamWriter extends StreamWriter } } logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}", - session.planId(), sstable.getFilename(), session.peer, progress, totalSize); + session.planId(), sstable.getFilename(), session.peer, FBUtilities.prettyPrintMemory(progress), FBUtilities.prettyPrintMemory(totalSize)); } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/tools/BulkLoader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java index f19924e..984cae4 100644 --- a/src/java/org/apache/cassandra/tools/BulkLoader.java +++ b/src/java/org/apache/cassandra/tools/BulkLoader.java @@ -36,6 +36,7 @@ import org.apache.cassandra.config.EncryptionOptions; import org.apache.cassandra.io.sstable.SSTableLoader; import org.apache.cassandra.security.SSLFactory; import org.apache.cassandra.streaming.*; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.JVMStabilityInspector; import org.apache.cassandra.utils.NativeSSTableLoaderClient; import org.apache.cassandra.utils.OutputHandler; @@ -122,7 +123,7 @@ public class BulkLoader private long lastProgress; private long lastTime; - private int peak = 0; + private long peak = 0; private int totalFiles = 0; private final Multimap<InetAddress, SessionInfo> sessionsByHost = HashMultimap.create(); @@ -208,37 +209,37 @@ public class BulkLoader lastProgress = totalProgress; sb.append("total: ").append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize).append("% "); - sb.append(String.format("%-3d", mbPerSec(deltaProgress, deltaTime))).append("MB/s"); - int average = mbPerSec(totalProgress, time - start); + sb.append(FBUtilities.prettyPrintMemoryPerSecond(deltaProgress, deltaTime)); + long average = bytesPerSecond(totalProgress, time - start); + if (average > peak) { peak = average; } - sb.append("(avg: ").append(average).append(" MB/s)"); + sb.append(" (avg: ").append(FBUtilities.prettyPrintMemoryPerSecond(totalProgress, time - start)).append(")"); - System.out.print(sb.toString()); + System.out.println(sb.toString()); } } - private int mbPerSec(long bytes, long timeInNano) + private long bytesPerSecond(long bytes, long timeInNano) { - double bytesPerNano = (double)bytes / timeInNano; - return (int)(bytesPerNano * 1000 * 1000 * 1000 / (1024 * 1024)); + return timeInNano != 0 ? (long) (((double) bytes / timeInNano) * 1000 * 1000 * 1000) : 0; } private void printSummary(int connectionsPerHost) { long end = System.nanoTime(); - long durationMS = (end - start) / 1000000; - int average = mbPerSec(lastProgress, end - start); + long durationMS = ((end - start) / (1000000)); + StringBuilder sb = new StringBuilder(); sb.append("\nSummary statistics: \n"); - sb.append(String.format(" %-30s: %-10d%n", "Connections per host: ", connectionsPerHost)); - sb.append(String.format(" %-30s: %-10d%n", "Total files transferred: ", totalFiles)); - sb.append(String.format(" %-30s: %-10d%n", "Total bytes transferred: ", lastProgress)); - sb.append(String.format(" %-30s: %-10d%n", "Total duration (ms): ", durationMS)); - sb.append(String.format(" %-30s: %-10d%n", "Average transfer rate (MB/s): ", + average)); - sb.append(String.format(" %-30s: %-10d%n", "Peak transfer rate (MB/s): ", + peak)); + sb.append(String.format(" %-24s: %-10d%n", "Connections per host ", connectionsPerHost)); + sb.append(String.format(" %-24s: %-10d%n", "Total files transferred ", totalFiles)); + sb.append(String.format(" %-24s: %-10s%n", "Total bytes transferred ", FBUtilities.prettyPrintMemory(lastProgress))); + sb.append(String.format(" %-24s: %-10s%n", "Total duration ", durationMS + " ms")); + sb.append(String.format(" %-24s: %-10s%n", "Average transfer rate ", FBUtilities.prettyPrintMemoryPerSecond(lastProgress, end - start))); + sb.append(String.format(" %-24s: %-10s%n", "Peak transfer rate ", FBUtilities.prettyPrintMemoryPerSecond(peak))); System.out.println(sb.toString()); } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java index bc1f85c..69fcbab 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java @@ -39,7 +39,7 @@ public class CompactionStats extends NodeToolCmd { @Option(title = "human_readable", name = {"-H", "--human-readable"}, - description = "Display bytes in human readable form, i.e. KB, MB, GB, TB") + description = "Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB") private boolean humanReadable = false; @Override http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/tools/nodetool/NetStats.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java index 3e06ca0..0d26e79 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/NetStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/NetStats.java @@ -35,7 +35,7 @@ public class NetStats extends NodeToolCmd { @Option(title = "human_readable", name = {"-H", "--human-readable"}, - description = "Display bytes in human readable form, i.e. KB, MB, GB, TB") + description = "Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB") private boolean humanReadable = false; @Override http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/tools/nodetool/TableStats.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/tools/nodetool/TableStats.java b/src/java/org/apache/cassandra/tools/nodetool/TableStats.java index f082466..3188c7e 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/TableStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/TableStats.java @@ -43,7 +43,7 @@ public class TableStats extends NodeToolCmd @Option(title = "human_readable", name = {"-H", "--human-readable"}, - description = "Display bytes in human readable form, i.e. KB, MB, GB, TB") + description = "Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB") private boolean humanReadable = false; @Override http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/utils/FBUtilities.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index ea41ebf..f8c82c3 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -595,6 +595,26 @@ public class FBUtilities return String.format("%.3fKiB", size / (double) (1 << 10)); } + public static String prettyPrintMemoryPerSecond(long rate) + { + if (rate >= 1 << 30) + return String.format("%.3fGiB/s", rate / (double) (1 << 30)); + if (rate >= 1 << 20) + return String.format("%.3fMiB/s", rate / (double) (1 << 20)); + return String.format("%.3fKiB/s", rate / (double) (1 << 10)); + } + + public static String prettyPrintMemoryPerSecond(long bytes, long timeInNano) + { + // We can't sanely calculate a rate over 0 nanoseconds + if (timeInNano == 0) + return "NaN KiB/s"; + + long rate = (long) (((double) bytes / timeInNano) * 1000 * 1000 * 1000); + + return prettyPrintMemoryPerSecond(rate); + } + /** * Starts and waits for the given @param pb to finish. * @throws java.io.IOException on non-zero exit code http://git-wip-us.apache.org/repos/asf/cassandra/blob/db68ac9f/src/java/org/apache/cassandra/utils/memory/BufferPool.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/utils/memory/BufferPool.java b/src/java/org/apache/cassandra/utils/memory/BufferPool.java index f972059..38c008d 100644 --- a/src/java/org/apache/cassandra/utils/memory/BufferPool.java +++ b/src/java/org/apache/cassandra/utils/memory/BufferPool.java @@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicLongFieldUpdater; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.io.compress.BufferType; import org.apache.cassandra.io.util.FileUtils; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.NoSpamLogger; import com.google.common.annotations.VisibleForTesting; @@ -115,7 +116,7 @@ public class BufferPool return ret; if (logger.isTraceEnabled()) - logger.trace("Requested buffer size {} has been allocated directly due to lack of capacity", size); + logger.trace("Requested buffer size {} has been allocated directly due to lack of capacity", FBUtilities.prettyPrintMemory(size)); return localPool.get().allocate(size, allocateOnHeapWhenExhausted); } @@ -131,7 +132,9 @@ public class BufferPool if (size > CHUNK_SIZE) { if (logger.isTraceEnabled()) - logger.trace("Requested buffer size {} is bigger than {}, allocating directly", size, CHUNK_SIZE); + logger.trace("Requested buffer size {} is bigger than {}, allocating directly", + FBUtilities.prettyPrintMemory(size), + FBUtilities.prettyPrintMemory(CHUNK_SIZE)); return localPool.get().allocate(size, allocateOnHeapWhenExhausted); } @@ -223,8 +226,8 @@ public class BufferPool if (DISABLED) logger.info("Global buffer pool is disabled, allocating {}", ALLOCATE_ON_HEAP_WHEN_EXAHUSTED ? "on heap" : "off heap"); else - logger.info("Global buffer pool is enabled, when pool is exahusted (max is {} mb) it will allocate {}", - MEMORY_USAGE_THRESHOLD / (1024L * 1024L), + logger.info("Global buffer pool is enabled, when pool is exahusted (max is {}) it will allocate {}", + FBUtilities.prettyPrintMemory(MEMORY_USAGE_THRESHOLD), ALLOCATE_ON_HEAP_WHEN_EXAHUSTED ? "on heap" : "off heap"); } @@ -260,8 +263,9 @@ public class BufferPool long cur = memoryUsage.get(); if (cur + MACRO_CHUNK_SIZE > MEMORY_USAGE_THRESHOLD) { - noSpamLogger.info("Maximum memory usage reached ({} bytes), cannot allocate chunk of {} bytes", - MEMORY_USAGE_THRESHOLD, MACRO_CHUNK_SIZE); + noSpamLogger.info("Maximum memory usage reached ({}), cannot allocate chunk of {}", + FBUtilities.prettyPrintMemory(MEMORY_USAGE_THRESHOLD), + FBUtilities.prettyPrintMemory(MACRO_CHUNK_SIZE)); return false; } if (memoryUsage.compareAndSet(cur, cur + MACRO_CHUNK_SIZE))
