Author: jbellis
Date: Mon Sep 7 14:45:09 2009
New Revision: 812162
URL: http://svn.apache.org/viewvc?rev=812162&view=rev
Log:
remove sstableLock. re-order a few ops so that we can never "lose" data
temporarily -- always remove old sstable references _after_ adding the new
ones. so at worst a few read ops will merge data from an sstable that is
obsolete -- this is ok and better than Stop The World locking
Modified:
incubator/cassandra/trunk/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
Modified:
incubator/cassandra/trunk/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
URL:
http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/java/org/apache/cassandra/db/ColumnFamilyStore.java?rev=812162&r1=812161&r2=812162&view=diff
==============================================================================
---
incubator/cassandra/trunk/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
(original)
+++
incubator/cassandra/trunk/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
Mon Sep 7 14:45:09 2009
@@ -79,10 +79,7 @@
private AtomicReference<BinaryMemtable> binaryMemtable_;
/* SSTables on disk for this column family */
- private SortedMap<String, SSTableReader> ssTables_ = new TreeMap<String,
SSTableReader>(new FileNameComparator(FileNameComparator.Descending));
-
- /* Modification lock used for protecting reads from compactions. */
- private ReentrantReadWriteLock sstableLock_ = new
ReentrantReadWriteLock(true);
+ private Map<String, SSTableReader> ssTables_ = new
NonBlockingHashMap<String, SSTableReader>();
private TimedStatsDeque readStats_ = new TimedStatsDeque(60000);
private TimedStatsDeque writeStats_ = new TimedStatsDeque(60000);
@@ -581,16 +578,8 @@
void addSSTable(SSTableReader sstable)
{
int ssTableCount;
- sstableLock_.writeLock().lock();
- try
- {
- ssTables_.put(sstable.getFilename(), sstable);
- ssTableCount = ssTables_.size();
- }
- finally
- {
- sstableLock_.writeLock().unlock();
- }
+ ssTables_.put(sstable.getFilename(), sstable);
+ ssTableCount = ssTables_.size();
/* it's ok if compaction gets submitted multiple times while one is
already in process.
worst that happens is, compactor will count the sstable files and
decide there are
@@ -800,24 +789,16 @@
doFileAntiCompaction(files, myRanges, null, newFiles);
if (logger_.isDebugEnabled())
logger_.debug("Original file : " + file + " of size " + new
File(file).length());
- sstableLock_.writeLock().lock();
- try
- {
- ssTables_.remove(file);
- for (String newfile : newFiles)
- {
- if (logger_.isDebugEnabled())
- logger_.debug("New file : " + newfile + " of size " + new
File(newfile).length());
- assert newfile != null;
- // TODO convert this to SSTableWriter.renameAndOpen
- ssTables_.put(newfile, SSTableReader.open(newfile));
- }
- SSTableReader.get(file).delete();
- }
- finally
+ for (String newfile : newFiles)
{
- sstableLock_.writeLock().unlock();
+ if (logger_.isDebugEnabled())
+ logger_.debug("New file : " + newfile + " of size " + new
File(newfile).length());
+ assert newfile != null;
+ // TODO can we convert this to SSTableWriter.renameAndOpen?
+ ssTables_.put(newfile, SSTableReader.open(newfile));
}
+ ssTables_.remove(file);
+ SSTableReader.get(file).delete();
}
/**
@@ -1121,26 +1102,15 @@
ssTable = writer.closeAndOpenReader();
newfile = writer.getFilename();
}
- sstableLock_.writeLock().lock();
- try
+ if (newfile != null)
{
- for (String file : files)
- {
- ssTables_.remove(file);
- }
- if (newfile != null)
- {
- ssTables_.put(newfile, ssTable);
- totalBytesWritten += (new File(newfile)).length();
- }
- for (String file : files)
- {
- SSTableReader.get(file).delete();
- }
+ ssTables_.put(newfile, ssTable);
+ totalBytesWritten += (new File(newfile)).length();
}
- finally
+ for (String file : files)
{
- sstableLock_.writeLock().unlock();
+ ssTables_.remove(file);
+ SSTableReader.get(file).delete();
}
String format = "Compacted to %s. %d/%d bytes for %d/%d keys
read/written. Time: %dms.";
@@ -1274,11 +1244,6 @@
return Collections.unmodifiableCollection(ssTables_.values());
}
- public ReentrantReadWriteLock.ReadLock getReadLock()
- {
- return sstableLock_.readLock();
- }
-
public int getReadCount()
{
return readStats_.size();
@@ -1347,7 +1312,6 @@
}
// we are querying top-level columns, do a merging fetch with indexes.
- sstableLock_.readLock().lock();
List<ColumnIterator> iterators = new ArrayList<ColumnIterator>();
try
{
@@ -1413,7 +1377,6 @@
}
readStats_.add(System.currentTimeMillis() - start);
- sstableLock_.readLock().unlock();
}
}
@@ -1426,19 +1389,6 @@
public RangeReply getKeyRange(final String startWith, final String stopAt,
int maxResults)
throws IOException, ExecutionException, InterruptedException
{
- getReadLock().lock();
- try
- {
- return getKeyRangeUnsafe(startWith, stopAt, maxResults);
- }
- finally
- {
- getReadLock().unlock();
- }
- }
-
- private RangeReply getKeyRangeUnsafe(final String startWith, final String
stopAt, int maxResults) throws IOException, ExecutionException,
InterruptedException
- {
// (OPP key decoration is a no-op so using the "decorated" comparator
against raw keys is fine)
final Comparator<String> comparator =
StorageService.getPartitioner().getDecoratedKeyComparator();
@@ -1540,37 +1490,29 @@
*/
public void snapshot(String snapshotName) throws IOException
{
- sstableLock_.readLock().lock();
- try
+ for (SSTableReader ssTable : ssTables_.values())
{
- for (SSTableReader ssTable : new
ArrayList<SSTableReader>(ssTables_.values()))
- {
- // mkdir
- File sourceFile = new File(ssTable.getFilename());
- File dataDirectory =
sourceFile.getParentFile().getParentFile();
- String snapshotDirectoryPath =
Table.getSnapshotPath(dataDirectory.getAbsolutePath(), table_, snapshotName);
- FileUtils.createDirectory(snapshotDirectoryPath);
-
- // hard links
- File targetLink = new File(snapshotDirectoryPath,
sourceFile.getName());
- FileUtils.createHardLink(sourceFile, targetLink);
-
- sourceFile = new File(ssTable.indexFilename());
- targetLink = new File(snapshotDirectoryPath,
sourceFile.getName());
- FileUtils.createHardLink(sourceFile, targetLink);
-
- sourceFile = new File(ssTable.filterFilename());
- targetLink = new File(snapshotDirectoryPath,
sourceFile.getName());
- FileUtils.createHardLink(sourceFile, targetLink);
-
- if (logger_.isDebugEnabled())
- logger_.debug("Snapshot for " + table_ + " table data file
" + sourceFile.getAbsolutePath() +
- " created as " + targetLink.getAbsolutePath());
- }
- }
- finally
- {
- sstableLock_.readLock().unlock();
+ // mkdir
+ File sourceFile = new File(ssTable.getFilename());
+ File dataDirectory = sourceFile.getParentFile().getParentFile();
+ String snapshotDirectoryPath =
Table.getSnapshotPath(dataDirectory.getAbsolutePath(), table_, snapshotName);
+ FileUtils.createDirectory(snapshotDirectoryPath);
+
+ // hard links
+ File targetLink = new File(snapshotDirectoryPath,
sourceFile.getName());
+ FileUtils.createHardLink(sourceFile, targetLink);
+
+ sourceFile = new File(ssTable.indexFilename());
+ targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
+ FileUtils.createHardLink(sourceFile, targetLink);
+
+ sourceFile = new File(ssTable.filterFilename());
+ targetLink = new File(snapshotDirectoryPath, sourceFile.getName());
+ FileUtils.createHardLink(sourceFile, targetLink);
+
+ if (logger_.isDebugEnabled())
+ logger_.debug("Snapshot for " + table_ + " table data file " +
sourceFile.getAbsolutePath() +
+ " created as " + targetLink.getAbsolutePath());
}
}
@@ -1579,14 +1521,6 @@
*/
void clearUnsafe()
{
- sstableLock_.writeLock().lock();
- try
- {
- memtable_.clearUnsafe();
- }
- finally
- {
- sstableLock_.writeLock().unlock();
- }
+ memtable_.clearUnsafe();
}
}