Author: j16sdiz
Date: 2008-09-02 15:32:21 +0000 (Tue, 02 Sep 2008)
New Revision: 22351
Added:
trunk/freenet/src/freenet/store/saltedhash/
trunk/freenet/src/freenet/store/saltedhash/CipherManager.java
trunk/freenet/src/freenet/store/saltedhash/LockManager.java
trunk/freenet/src/freenet/store/saltedhash/SaltedHashFreenetStore.java
trunk/freenet/src/freenet/support/BinaryBloomFilter.java
trunk/freenet/src/freenet/support/BloomFilter.java
trunk/freenet/src/freenet/support/CountingBloomFilter.java
trunk/freenet/src/freenet/support/NullBloomFilter.java
trunk/freenet/test/freenet/support/BloomFilterTest.java
Modified:
trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties
trunk/freenet/src/freenet/node/Node.java
trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
trunk/freenet/src/freenet/store/FreenetStore.java
trunk/freenet/src/freenet/store/PubkeyStore.java
trunk/freenet/src/freenet/store/RAMFreenetStore.java
trunk/freenet/src/freenet/store/StoreCallback.java
Log:
Merge branch 'saltedhashstore'
Modified: trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties
===================================================================
--- trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties 2008-09-02
15:28:42 UTC (rev 22350)
+++ trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties 2008-09-02
15:32:21 UTC (rev 22351)
@@ -708,7 +708,11 @@
Node.storeSize=Store size in bytes
Node.storeSizeLong=Store size in bytes
Node.storeType=Store type (LEAVE THIS ALONE)
-Node.storeTypeLong=Datastore type. Currently this can be bdb-index (use a
BerkeleyDBFreenetStore to store the index, and keep the data in files on disk),
or ram (keep the index and the data in RAM). Only use ram if you know what you
are doing and have enough RAM to store all your data (and note it will not be
saved on shutdown)!
+Node.storeTypeLong=Datastore type. Currently this can be salt-hash (use a
salted on-disk hashtable with bloom filter), bdb-index (use a
BerkeleyDBFreenetStore to store the index, and keep the data in files on disk),
or ram (keep the index and the data in RAM). Only use ram if you know what you
are doing and have enough RAM to store all your data (and note it will not be
saved on shutdown)! Changes will not take effect until Freenet has been
restarted.
+Node.storeBloomFilterSize=Bloom filter size (total) in bytes
+Node.storeBloomFilterSizeLong=Bloom filter size (total) in bytes. Usually
1/1500 the size of data store is more than enough. Set this to zero to disable
bloom filter.
+Node.storeBloomFilterCounting=Use counting bloom filter?
+Node.storeBloomFilterCountingLong=Use 2-bit counting bloom filter? (don't
touch this unless you know what you are doing)
Node.swapRInterval=Swap request send interval (ms)
Node.swapRIntervalLong=Interval between swap attempting to send swap requests
in milliseconds. Leave this alone!
Node.throttleLocalTraffic=Throttle local traffic?
@@ -923,6 +927,11 @@
PproxyToadlet.unloadPluginWithName=Are you sure you wish to unload ${name}?
PproxyToadlet.unloadPurge=Remove plugin from cache
PproxyToadlet.versionTitle=Version
+SaltedHashFreenetStore.shortResizeProgress=Datastore(${name}) resize in
progress: ${processed}/${total}
+SaltedHashFreenetStore.shortRebuildProgress=Datastore(${name}) maintenance in
progress: ${processed}/${total}
+SaltedHashFreenetStore.longResizeProgress=Datastore(${name}) resize in
progress: ${processed}/${total}. The node may be a little bit slower then usual
during the process. Avoid restarting the node during this.
+SaltedHashFreenetStore.longRebuildProgress=Datastore(${name}) maintenance in
progress: ${processed}/${total}. The node may be a little bit slower then usual
during the process. Avoid restarting the node during this.
+SaltedHashFreenetStore.cleanerAlertTitle=Datastore maintenance task running
QueueToadlet.DUinProgress=Directory uploads in progress (${size})
QueueToadlet.DinProgress=Downloads in progress (${size})
QueueToadlet.UinProgress=Uploads in progress (${size})
Modified: trunk/freenet/src/freenet/node/Node.java
===================================================================
--- trunk/freenet/src/freenet/node/Node.java 2008-09-02 15:28:42 UTC (rev
22350)
+++ trunk/freenet/src/freenet/node/Node.java 2008-09-02 15:32:21 UTC (rev
22351)
@@ -95,6 +95,7 @@
import freenet.store.PubkeyStore;
import freenet.store.RAMFreenetStore;
import freenet.store.SSKStore;
+import freenet.store.saltedhash.SaltedHashFreenetStore;
import freenet.support.DoubleTokenBucket;
import freenet.support.Executor;
import freenet.support.Fields;
@@ -196,7 +197,7 @@
}
public String[] getPossibleValues() {
- return new String[] { "bdb-index", "ram" };
+ return new String[] { "bdb-index", "salt-hash", "ram" };
}
public void setPossibleValues(String[] val) {
@@ -296,7 +297,10 @@
/** Datastore directory */
private final File storeDir;
+ /** Datastore properties */
private final String storeType;
+ private final int storeBloomFilterSize;
+ private final boolean storeBloomFilterCounting;
/** The number of bytes per key total in all the different datastores.
All the datastores
* are always the same size in number of keys. */
@@ -1299,8 +1303,7 @@
new NodeNameCallback(this));
myName = nodeConfig.getString("name");
- // Datastore
-
+ // Datastore
nodeConfig.register("storeForceBigShrinks", false, sortOrder++,
true, false, "Node.forceBigShrink", "Node.forceBigShrinkLong",
new BooleanCallback() {
@@ -1317,6 +1320,50 @@
}
});
+
+ nodeConfig.register("storeBloomFilterSize", 0x3600000,
sortOrder++, true, false, "Node.storeBloomFilterSize",
+ "Node.storeBloomFilterSizeLong", new IntCallback() {
+ private Integer cachedBloomFilterSize;
+
+ public Integer get() {
+ if (cachedBloomFilterSize == null)
+ cachedBloomFilterSize =
storeBloomFilterSize;
+ return cachedBloomFilterSize;
+ }
+
+ public void set(Integer val) throws
InvalidConfigValueException, NodeNeedRestartException {
+ cachedBloomFilterSize = val;
+ throw new
NodeNeedRestartException("Store bloom filter size cannot be changed on the
fly");
+ }
+
+ public boolean isReadOnly() {
+ return !("salt-hash".equals(storeType));
+ }
+ });
+
+ storeBloomFilterSize =
nodeConfig.getInt("storeBloomFilterSize");
+
+ nodeConfig.register("storeBloomFilterCounting", true,
sortOrder++, true, false,
+ "Node.storeBloomFilterCounting",
"Node.storeBloomFilterCountingLong", new BooleanCallback() {
+ private Boolean cachedBloomFilterCounting;
+
+ public Boolean get() {
+ if (cachedBloomFilterCounting == null)
+ cachedBloomFilterCounting =
storeBloomFilterCounting;
+ return cachedBloomFilterCounting;
+ }
+
+ public void set(Boolean val) throws
InvalidConfigValueException, NodeNeedRestartException {
+ cachedBloomFilterCounting = val;
+ throw new
NodeNeedRestartException("Store bloom filter type cannot be changed on the
fly");
+ }
+
+ public boolean isReadOnly() {
+ return !("salt-hash".equals(storeType));
+ }
+ });
+
+ storeBloomFilterCounting =
nodeConfig.getBoolean("storeBloomFilterCounting");
nodeConfig.register("storeType", "bdb-index", sortOrder++,
true, false, "Node.storeType", "Node.storeTypeLong", new StoreTypeCallback());
@@ -1398,7 +1445,80 @@
maxStoreKeys = maxTotalKeys / 2;
maxCacheKeys = maxTotalKeys - maxStoreKeys;
- if(storeType.equals("bdb-index")) {
+ if (storeType.equals("salt-hash")) {
+ storeEnvironment = null;
+ envMutableConfig = null;
+ try {
+ int bloomFilterSizeInM =
storeBloomFilterCounting ? storeBloomFilterSize / 6 * 4
+ : (storeBloomFilterSize + 6) / 6 * 8;
// + 6 to make size different, trigger rebuild
+
+ Logger.normal(this, "Initializing CHK
Datastore");
+ System.out.println("Initializing CHK Datastore
(" + maxStoreKeys + " keys)");
+ chkDatastore = new CHKStore();
+ SaltedHashFreenetStore chkDataFS =
SaltedHashFreenetStore.construct(storeDir, "CHK-store",
+ chkDatastore, random, maxStoreKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+ Logger.normal(this, "Initializing CHK
Datacache");
+ System.out.println("Initializing CHK Datacache
(" + maxCacheKeys + ':' + maxCacheKeys + " keys)");
+ chkDatacache = new CHKStore();
+ SaltedHashFreenetStore chkCacheFS =
SaltedHashFreenetStore.construct(storeDir, "CHK-cache",
+ chkDatacache, random, maxCacheKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+ Logger.normal(this, "Initializing pubKey
Datastore");
+ System.out.println("Initializing pubKey
Datastore");
+ pubKeyDatastore = new PubkeyStore();
+ SaltedHashFreenetStore pubkeyDataFS =
SaltedHashFreenetStore.construct(storeDir, "PUBKEY-store",
+ pubKeyDatastore, random, maxStoreKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+ Logger.normal(this, "Initializing pubKey
Datacache");
+ System.out.println("Initializing pubKey
Datacache (" + maxCacheKeys + " keys)");
+ pubKeyDatacache = new PubkeyStore();
+ SaltedHashFreenetStore pubkeyCacheFS =
SaltedHashFreenetStore.construct(storeDir, "PUBKEY-cache",
+ pubKeyDatacache, random, maxCacheKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+ Logger.normal(this, "Initializing SSK
Datastore");
+ System.out.println("Initializing SSK
Datastore");
+ sskDatastore = new SSKStore(this);
+ SaltedHashFreenetStore sskDataFS =
SaltedHashFreenetStore.construct(storeDir, "SSK-store",
+ sskDatastore, random, maxStoreKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+ Logger.normal(this, "Initializing SSK
Datacache");
+ System.out.println("Initializing SSK Datacache
(" + maxCacheKeys + " keys)");
+ sskDatacache = new SSKStore(this);
+ SaltedHashFreenetStore sskCacheFS =
SaltedHashFreenetStore.construct(storeDir, "SSK-cache",
+ sskDatacache, random, maxCacheKeys,
bloomFilterSizeInM, storeBloomFilterCounting,
+ shutdownHook);
+
+ File migrationFile = new File(storeDir,
"migrated");
+ if (!migrationFile.exists()) {
+ chkDataFS.migrationFrom(//
+ new File(storeDir, "chk" +
suffix + ".store"), //
+ new File(storeDir, "chk" +
suffix + ".store.keys"));
+ chkCacheFS.migrationFrom(//
+ new File(storeDir, "chk" +
suffix + ".cache"), //
+ new File(storeDir, "chk" +
suffix + ".cache.keys"));
+
+ pubkeyDataFS.migrationFrom(//
+ new File(storeDir, "pubkey" +
suffix + ".store"), //
+ new File(storeDir, "pubkey" +
suffix + ".store.keys"));
+ pubkeyCacheFS.migrationFrom(//
+ new File(storeDir, "pubkey" +
suffix + ".cache"), //
+ new File(storeDir, "pubkey" +
suffix + ".cache.keys"));
+
+ sskDataFS.migrationFrom(//
+ new File(storeDir, "ssk" +
suffix + ".store"), //
+ new File(storeDir, "ssk" +
suffix + ".store.keys"));
+ sskCacheFS.migrationFrom(//
+ new File(storeDir, "ssk" +
suffix + ".cache"), //
+ new File(storeDir, "ssk" +
suffix + ".cache.keys"));
+ migrationFile.createNewFile();
+ }
+ } catch (IOException e) {
+ System.err.println("Could not open store: " +
e);
+ e.printStackTrace();
+ throw new
NodeInitException(NodeInitException.EXIT_STORE_OTHER, e.getMessage());
+ }
+ } else if (storeType.equals("bdb-index")) {
// Setup datastores
EnvironmentConfig envConfig =
BerkeleyDBFreenetStore.getBDBConfig();
@@ -1603,7 +1723,16 @@
clientCore = new NodeClientCore(this, config, nodeConfig,
nodeDir, getDarknetPortNumber(), sortOrder, oldConfig, fproxyConfig, toadlets);
netid = new NetworkIDManager(this);
-
+
+ if (storeType.equals("salt-hash")) {
+ ((SaltedHashFreenetStore)
chkDatastore.getStore()).setUserAlertManager(clientCore.alerts);
+ ((SaltedHashFreenetStore)
chkDatacache.getStore()).setUserAlertManager(clientCore.alerts);
+ ((SaltedHashFreenetStore)
pubKeyDatastore.getStore()).setUserAlertManager(clientCore.alerts);
+ ((SaltedHashFreenetStore)
pubKeyDatacache.getStore()).setUserAlertManager(clientCore.alerts);
+ ((SaltedHashFreenetStore)
sskDatastore.getStore()).setUserAlertManager(clientCore.alerts);
+ ((SaltedHashFreenetStore)
sskDatacache.getStore()).setUserAlertManager(clientCore.alerts);
+ }
+
nodeConfig.register("disableHangCheckers", false, sortOrder++,
true, false, "Node.disableHangCheckers", "Node.disableHangCheckersLong", new
BooleanCallback() {
public Boolean get() {
Modified: trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2008-09-02
15:28:42 UTC (rev 22350)
+++ trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -1773,14 +1773,16 @@
*/
private class StoreBlockTupleBinding extends TupleBinding {
- public void objectToEntry(Object object, TupleOutput to) {
+ @Override
+ public void objectToEntry(Object object, TupleOutput to) {
StoreBlock myData = (StoreBlock)object;
to.writeLong(myData.getOffset());
to.writeLong(myData.getRecentlyUsed());
}
- public Object entryToObject(TupleInput ti) {
+ @Override
+ public Object entryToObject(TupleInput ti) {
long offset = ti.readLong();
long lastAccessed = ti.readLong();
@@ -1830,7 +1832,8 @@
}
private class ShutdownHook extends Thread {
- public void run() {
+ @Override
+ public void run() {
System.err.println("Closing database due to shutdown.");
close(true);
}
@@ -2303,6 +2306,10 @@
return envConfig;
}
+ public long getBloomFalsePositive() {
+ return -1;
+ }
+
public boolean probablyInStore(byte[] routingKey) {
DatabaseEntry routingkeyDBE = new DatabaseEntry(routingKey);
DatabaseEntry blockDBE = new DatabaseEntry();
Modified: trunk/freenet/src/freenet/store/FreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/FreenetStore.java 2008-09-02 15:28:42 UTC
(rev 22350)
+++ trunk/freenet/src/freenet/store/FreenetStore.java 2008-09-02 15:32:21 UTC
(rev 22351)
@@ -53,6 +53,8 @@
public long writes();
public long keyCount();
+
+ public long getBloomFalsePositive();
/**
* Check if a routing key probably
Modified: trunk/freenet/src/freenet/store/PubkeyStore.java
===================================================================
--- trunk/freenet/src/freenet/store/PubkeyStore.java 2008-09-02 15:28:42 UTC
(rev 22350)
+++ trunk/freenet/src/freenet/store/PubkeyStore.java 2008-09-02 15:32:21 UTC
(rev 22351)
@@ -14,7 +14,7 @@
return false;
}
- StorableBlock construct(byte[] data, byte[] headers, byte[] routingKey,
+ public StorableBlock construct(byte[] data, byte[] headers, byte[]
routingKey,
byte[] fullKey) throws KeyVerifyException {
if(data == null) throw new PubkeyVerifyException("Need data to
construct pubkey");
try {
Modified: trunk/freenet/src/freenet/store/RAMFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/RAMFreenetStore.java 2008-09-02
15:28:42 UTC (rev 22350)
+++ trunk/freenet/src/freenet/store/RAMFreenetStore.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -57,7 +57,7 @@
}
public synchronized long getMaxKeys() {
- return (long) maxKeys;
+ return maxKeys;
}
public synchronized long hits() {
@@ -122,6 +122,10 @@
return writes;
}
+ public long getBloomFalsePositive() {
+ return -1;
+ }
+
public boolean probablyInStore(byte[] routingKey) {
ByteArrayWrapper key = new ByteArrayWrapper(routingKey);
return blocksByRoutingKey.get(key) != null;
Modified: trunk/freenet/src/freenet/store/StoreCallback.java
===================================================================
--- trunk/freenet/src/freenet/store/StoreCallback.java 2008-09-02 15:28:42 UTC
(rev 22350)
+++ trunk/freenet/src/freenet/store/StoreCallback.java 2008-09-02 15:32:21 UTC
(rev 22351)
@@ -38,17 +38,22 @@
protected FreenetStore store;
/** Called once when first connecting to a FreenetStore. Package-local.
*/
- void setStore(FreenetStore store) {
+ public void setStore(FreenetStore store) {
this.store = store;
}
+ public FreenetStore getStore() {
+ return store;
+ }
+
// Reconstruction
/** Construct a StorableBlock from the data, headers, and optionally
routing key or full key.
* IMPORTANT: Using the full key or routing key is OPTIONAL, and if we
don't use them, WE DON'T
* CHECK THEM EITHER! Caller MUST check that the key is the one
expected.
* @throws KeyVerifyException */
- abstract StorableBlock construct(byte[] data, byte[] headers, byte[]
routingKey, byte[] fullKey) throws KeyVerifyException;
+ public abstract StorableBlock construct(byte[] data, byte[] headers,
byte[] routingKey, byte[] fullKey)
+ throws KeyVerifyException;
public void setMaxKeys(long maxStoreKeys, boolean shrinkNow) throws
DatabaseException, IOException {
store.setMaxKeys(maxStoreKeys, shrinkNow);
@@ -73,6 +78,10 @@
public long keyCount() {
return store.keyCount();
}
+
+ public long getBloomFalsePositive() {
+ return store.getBloomFalsePositive();
+ }
/** Generate a routing key from a full key */
public abstract byte[] routingKeyFromFullKey(byte[] keyBuf);
Added: trunk/freenet/src/freenet/store/saltedhash/CipherManager.java
===================================================================
--- trunk/freenet/src/freenet/store/saltedhash/CipherManager.java
(rev 0)
+++ trunk/freenet/src/freenet/store/saltedhash/CipherManager.java
2008-09-02 15:32:21 UTC (rev 22351)
@@ -0,0 +1,166 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.store.saltedhash;
+
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Random;
+
+import freenet.crypt.BlockCipher;
+import freenet.crypt.PCFBMode;
+import freenet.crypt.SHA256;
+import freenet.crypt.UnsupportedCipherException;
+import freenet.crypt.ciphers.Rijndael;
+import freenet.support.ByteArrayWrapper;
+import freenet.support.Logger;
+
+/**
+ * Cipher Manager
+ *
+ * Manage all kind of digestion and encryption in store
+ *
+ * @author sdiz
+ */
+public class CipherManager {
+ /**
+ * <tt>0x10</tt> bytes of salt for better digestion, not too salty.
+ */
+ private byte[] salt;
+
+ CipherManager(byte[] salt) {
+ assert salt.length == 0x10;
+ this.salt = salt;
+ }
+
+ /**
+ * Get salt
+ *
+ * @return salt
+ */
+ byte[] getSalt() {
+ return salt;
+ }
+
+ /**
+ * Cache for digested keys
+ */
+ private Map<ByteArrayWrapper, byte[]> digestRoutingKeyCache = new
LinkedHashMap<ByteArrayWrapper, byte[]>() {
+ @Override
+ protected boolean removeEldestEntry(Map.Entry<ByteArrayWrapper,
byte[]> eldest) {
+ return size() > 128;
+ }
+ };
+
+ /**
+ * Get digested routing key
+ *
+ * @param plainKey
+ * @return
+ */
+ byte[] getDigestedKey(byte[] plainKey) {
+ ByteArrayWrapper key = new ByteArrayWrapper(plainKey);
+ synchronized (digestRoutingKeyCache) {
+ byte[] dk = digestRoutingKeyCache.get(key);
+ if (dk != null)
+ return dk;
+ }
+
+ MessageDigest digest = SHA256.getMessageDigest();
+ try {
+ digest.update(plainKey);
+ digest.update(salt);
+
+ byte[] hashedRoutingKey = digest.digest();
+ assert hashedRoutingKey.length == 0x20;
+
+ synchronized (digestRoutingKeyCache) {
+ digestRoutingKeyCache.put(key,
hashedRoutingKey);
+ }
+
+ return hashedRoutingKey;
+ } finally {
+ SHA256.returnMessageDigest(digest);
+ }
+ }
+
+ /**
+ * Encrypt this entry
+ */
+ void encrypt(SaltedHashFreenetStore.Entry entry, Random random) {
+ if (entry.isEncrypted)
+ return;
+
+ entry.dataEncryptIV = new byte[16];
+ random.nextBytes(entry.dataEncryptIV);
+
+ PCFBMode cipher = makeCipher(entry.dataEncryptIV,
entry.plainRoutingKey);
+ entry.header = cipher.blockEncipher(entry.header, 0,
entry.header.length);
+ entry.data = cipher.blockEncipher(entry.data, 0,
entry.data.length);
+
+ entry.getDigestedRoutingKey();
+ entry.isEncrypted = true;
+ }
+
+ /**
+ * Verify and decrypt this entry
+ *
+ * @param routingKey
+ * @return <code>true</code> if the <code>routeKey</code> match and the
entry is decrypted.
+ */
+ boolean decrypt(SaltedHashFreenetStore.Entry entry, byte[] routingKey) {
+ assert entry.header != null;
+ assert entry.data != null;
+
+ if (!entry.isEncrypted) {
+ // Already decrypted
+ if (Arrays.equals(entry.plainRoutingKey, routingKey))
+ return true;
+ else
+ return false;
+ }
+
+ if (entry.plainRoutingKey != null) {
+ // we knew the key
+ if (!Arrays.equals(entry.plainRoutingKey, routingKey)) {
+ return false;
+ }
+ } else {
+ // we do not know the plain key, let's check the digest
+ if (!Arrays.equals(entry.digestedRoutingKey,
getDigestedKey(routingKey)))
+ return false;
+ }
+
+ entry.plainRoutingKey = routingKey;
+
+ PCFBMode cipher = makeCipher(entry.dataEncryptIV,
entry.plainRoutingKey);
+ entry.header = cipher.blockDecipher(entry.header, 0,
entry.header.length);
+ entry.data = cipher.blockDecipher(entry.data, 0,
entry.data.length);
+
+ entry.isEncrypted = false;
+
+ return true;
+ }
+
+ /**
+ * Create PCFBMode object for this key
+ */
+ PCFBMode makeCipher(byte[] iv, byte[] key) {
+ byte[] iv2 = new byte[0x20]; // 256 bits
+
+ System.arraycopy(salt, 0, iv2, 0, 0x10);
+ System.arraycopy(iv, 0, iv2, 0x10, 0x10);
+
+ try {
+ BlockCipher aes = new Rijndael(256, 256);
+ aes.initialize(key);
+
+ return PCFBMode.create(aes, iv2);
+ } catch (UnsupportedCipherException e) {
+ Logger.error(this, "Rijndael not supported!", e);
+ throw new Error("Rijndael not supported!", e);
+ }
+ }
+}
Added: trunk/freenet/src/freenet/store/saltedhash/LockManager.java
===================================================================
--- trunk/freenet/src/freenet/store/saltedhash/LockManager.java
(rev 0)
+++ trunk/freenet/src/freenet/store/saltedhash/LockManager.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -0,0 +1,103 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.store.saltedhash;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import freenet.support.Logger;
+
+/**
+ * Lock Manager
+ *
+ * Handle locking/unlocking of individual offsets.
+ *
+ * @author sdiz
+ */
+public class LockManager {
+ private static boolean logDEBUG;
+ private volatile boolean shutdown;
+ private Lock entryLock = new ReentrantLock();
+ private Map<Long, Condition> lockMap = new HashMap<Long, Condition>();
+
+ LockManager() {
+ logDEBUG = Logger.shouldLog(Logger.DEBUG, this);
+ }
+
+ /**
+ * Lock the entry
+ *
+ * This lock is <strong>not</strong> re-entrance. No threads except
Cleaner should hold more
+ * then one lock at a time (or deadlock may occur).
+ */
+ Condition lockEntry(long offset) {
+ if (logDEBUG)
+ Logger.debug(this, "try locking " + offset, new
Exception());
+
+ Condition condition;
+ try {
+ entryLock.lock();
+ try {
+ do {
+ if (shutdown)
+ return null;
+
+ Condition lockCond =
lockMap.get(offset);
+ if (lockCond != null)
+ lockCond.await(10,
TimeUnit.SECONDS); // 10s for checking shutdown
+ else
+ break;
+ } while (true);
+ condition = entryLock.newCondition();
+ lockMap.put(offset, condition);
+ } finally {
+ entryLock.unlock();
+ }
+ } catch (InterruptedException e) {
+ Logger.error(this, "lock interrupted", e);
+ return null;
+ }
+
+ if (logDEBUG)
+ Logger.debug(this, "locked " + offset, new Exception());
+ return condition;
+ }
+
+ /**
+ * Unlock the entry
+ */
+ void unlockEntry(long offset, Condition condition) {
+ if (logDEBUG)
+ Logger.debug(this, "unlocking " + offset, new
Exception("debug"));
+
+ entryLock.lock();
+ try {
+ Condition cond = lockMap.remove(offset);
+ assert cond == condition;
+ cond.signal();
+ } finally {
+ entryLock.unlock();
+ }
+ }
+
+ /**
+ * Shutdown and wait for all entries unlocked
+ */
+ void shutdown() {
+ shutdown = true;
+ entryLock.lock();
+ try {
+ while (!lockMap.isEmpty()) {
+ Condition cond =
lockMap.values().iterator().next();
+ cond.awaitUninterruptibly();
+ }
+ } finally {
+ entryLock.unlock();
+ }
+ }
+}
Added: trunk/freenet/src/freenet/store/saltedhash/SaltedHashFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/saltedhash/SaltedHashFreenetStore.java
(rev 0)
+++ trunk/freenet/src/freenet/store/saltedhash/SaltedHashFreenetStore.java
2008-09-02 15:32:21 UTC (rev 22351)
@@ -0,0 +1,1685 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.store.saltedhash;
+
+import java.io.EOFException;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Random;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.tanukisoftware.wrapper.WrapperManager;
+
+import freenet.keys.KeyVerifyException;
+import freenet.l10n.L10n;
+import freenet.node.SemiOrderedShutdownHook;
+import freenet.node.useralerts.UserAlert;
+import freenet.node.useralerts.UserAlertManager;
+import freenet.store.FreenetStore;
+import freenet.store.KeyCollisionException;
+import freenet.store.StorableBlock;
+import freenet.store.StoreCallback;
+import freenet.support.BloomFilter;
+import freenet.support.Fields;
+import freenet.support.HTMLNode;
+import freenet.support.HexUtil;
+import freenet.support.Logger;
+import freenet.support.io.FileUtil;
+import freenet.support.io.NativeThread;
+
+/**
+ * Index-less data store based on salted hash
+ *
+ * @author sdiz
+ */
+public class SaltedHashFreenetStore implements FreenetStore {
+ /** Option for saving plainkey */
+ private static final boolean OPTION_SAVE_PLAINKEY = true;
+ private static final int OPTION_MAX_PROBE = 4;
+
+ private static final byte FLAG_DIRTY = 0x1;
+ private static final byte FLAG_REBUILD_BLOOM = 0x2;
+
+ private boolean checkBloom = true;
+ private int bloomFilterSize;
+ private int bloomFilterK;
+ private final BloomFilter bloomFilter;
+
+ private static boolean logMINOR;
+ private static boolean logDEBUG;
+
+ private final File baseDir;
+ private final String name;
+ private final StoreCallback callback;
+ private final boolean collisionPossible;
+ private final int headerBlockLength;
+ private final int routeKeyLength;
+ private final int fullKeyLength;
+ private final int dataBlockLength;
+ private final Random random;
+ private UserAlertManager userAlertManager;
+
+ private long storeSize;
+ private int generation;
+ private int flags;
+
+ public static SaltedHashFreenetStore construct(File baseDir, String
name, StoreCallback callback, Random random,
+ long maxKeys, int bloomFilterSize, boolean bloomCounting,
SemiOrderedShutdownHook shutdownHook)
+ throws IOException {
+ return new SaltedHashFreenetStore(baseDir, name, callback,
random, maxKeys, bloomFilterSize, bloomCounting,
+ shutdownHook);
+ }
+
+ private SaltedHashFreenetStore(File baseDir, String name, StoreCallback
callback, Random random, long maxKeys,
+ int bloomFilterSize, boolean bloomCounting,
SemiOrderedShutdownHook shutdownHook) throws IOException {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ logDEBUG = Logger.shouldLog(Logger.DEBUG, this);
+
+ this.baseDir = baseDir;
+ this.name = name;
+
+ this.callback = callback;
+ collisionPossible = callback.collisionPossible();
+ routeKeyLength = callback.routingKeyLength();
+ headerBlockLength = callback.headerLength();
+ fullKeyLength = callback.fullKeyLength();
+ dataBlockLength = callback.dataLength();
+
+ this.random = random;
+ storeSize = maxKeys;
+ this.bloomFilterSize = bloomFilterSize;
+
+ lockManager = new LockManager();
+
+ // Create a directory it not exist
+ this.baseDir.mkdirs();
+
+ configFile = new File(this.baseDir, name + ".config");
+ boolean newStore = loadConfigFile();
+
+ newStore |= openStoreFiles(baseDir, name);
+
+ File bloomFile = new File(this.baseDir, name + ".bloom");
+ bloomFilter = BloomFilter.createFilter(bloomFile,
bloomFilterSize, bloomFilterK, bloomCounting);
+
+ if ((flags & FLAG_DIRTY) != 0)
+ System.err.println("Datastore(" + name + ") is dirty.");
+
+ flags |= FLAG_DIRTY; // datastore is now dirty until
flushAndClose()
+ writeConfigFile();
+
+ if (maxKeys != storeSize) {
+ if (prevStoreSize != 0) {
+ storeSize = Math.max(prevStoreSize, storeSize);
+ prevStoreSize = 0;
+ }
+ setMaxKeys(maxKeys, true);
+ }
+
+ callback.setStore(this);
+ shutdownHook.addEarlyJob(new Thread(new ShutdownDB()));
+
+ cleanerThread = new Cleaner();
+ cleanerStatusUserAlert = new
CleanerStatusUserAlert(cleanerThread);
+
+ // finish all resizing before continue
+ if (prevStoreSize != 0 && cleanerGlobalLock.tryLock()) {
+ System.out.println("Resizing datastore (" + name + ")");
+ try {
+ cleanerThread.resizeStore(prevStoreSize, false);
+ } finally {
+ cleanerGlobalLock.unlock();
+ }
+ writeConfigFile();
+ } else if (bloomFilter.needRebuild() && !newStore) {
+ // Bloom filter resized?
+ flags |= FLAG_REBUILD_BLOOM;
+ checkBloom = false;
+
+ if (cleanerGlobalLock.tryLock()) {
+ System.out.println("Bloom filter for datastore
(" + name + ") missing/mismatch, rebuilding.");
+ try {
+ cleanerThread.rebuildBloom(false);
+ } finally {
+ cleanerGlobalLock.unlock();
+ }
+ writeConfigFile();
+ }
+ }
+
+ cleanerThread.start();
+ }
+
+ public StorableBlock fetch(byte[] routingKey, byte[] fullKey, boolean
dontPromote) throws IOException {
+ if (logMINOR)
+ Logger.minor(this, "Fetch " +
HexUtil.bytesToHex(routingKey) + " for " + callback);
+
+ configLock.readLock().lock();
+ try {
+ Map<Long, Condition> lockMap = lockPlainKey(routingKey,
true);
+ if (lockMap == null) {
+ if (logDEBUG)
+ Logger.debug(this, "cannot lock key: "
+ HexUtil.bytesToHex(routingKey) + ", shutting down?");
+ return null;
+ }
+ try {
+ Entry entry = probeEntry(routingKey, true);
+
+ if (entry == null) {
+ misses.incrementAndGet();
+ return null;
+ }
+
+ try {
+ StorableBlock block =
entry.getStorableBlock(routingKey, fullKey);
+ if (block == null) {
+ misses.incrementAndGet();
+ return null;
+ }
+ hits.incrementAndGet();
+ return block;
+ } catch (KeyVerifyException e) {
+ Logger.minor(this, "key verification
exception", e);
+ misses.incrementAndGet();
+ return null;
+ }
+ } finally {
+ unlockPlainKey(routingKey, true, lockMap);
+ }
+ } finally {
+ configLock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Find and lock an entry with a specific routing key. This function
would <strong>not</strong>
+ * lock the entries.
+ *
+ * @param routingKey
+ * @param withData
+ * @return <code>Entry</code> object
+ * @throws IOException
+ */
+ private Entry probeEntry(byte[] routingKey, boolean withData) throws
IOException {
+ if (checkBloom)
+ if
(!bloomFilter.checkFilter(cipherManager.getDigestedKey(routingKey)))
+ return null;
+
+ Entry entry = probeEntry0(routingKey, storeSize, withData);
+
+ if (entry == null && prevStoreSize != 0)
+ entry = probeEntry0(routingKey, prevStoreSize,
withData);
+ if (checkBloom && entry == null)
+ bloomFalsePos.incrementAndGet();
+
+ return entry;
+ }
+
+ private Entry probeEntry0(byte[] routingKey, long probeStoreSize,
boolean withData) throws IOException {
+ Entry entry = null;
+ long[] offset = getOffsetFromPlainKey(routingKey,
probeStoreSize);
+
+ for (int i = 0; i < offset.length; i++) {
+ if (logDEBUG)
+ Logger.debug(this, "probing for i=" + i + ",
offset=" + offset[i]);
+
+ try {
+ entry = readEntry(offset[i], routingKey,
withData);
+ if (entry != null)
+ return entry;
+ } catch (EOFException e) {
+ if (prevStoreSize != 0) // may occur on store
shrinking
+ Logger.error(this, "EOFException on
probeEntry", e);
+ continue;
+ }
+ }
+ return null;
+ }
+
+ public void put(StorableBlock block, byte[] routingKey, byte[] fullKey,
byte[] data, byte[] header,
+ boolean overwrite) throws IOException, KeyCollisionException {
+ if (logMINOR)
+ Logger.minor(this, "Putting " +
HexUtil.bytesToHex(routingKey) + " (" + name + ")");
+
+ configLock.readLock().lock();
+ try {
+ Map<Long, Condition> lockMap = lockPlainKey(routingKey,
false);
+ if (lockMap == null) {
+ if (logDEBUG)
+ Logger.debug(this, "cannot lock key: "
+ HexUtil.bytesToHex(routingKey) + ", shutting down?");
+ return;
+ }
+ try {
+ /*
+ * Use lazy loading here. This may lost data if
digestedRoutingKey collide but
+ * collisionPossible is false. Should be very
rare as digestedRoutingKey is a
+ * SHA-256 hash.
+ */
+ Entry oldEntry = probeEntry(routingKey, false);
+ if (oldEntry != null && !oldEntry.isFree()) {
+ long oldOffset = oldEntry.curOffset;
+ try {
+ if (!collisionPossible)
+ return;
+
oldEntry.setData(readHeader(oldOffset), readData(oldOffset)); // read from disk
+ StorableBlock oldBlock =
oldEntry.getStorableBlock(routingKey, fullKey);
+ if (block.equals(oldBlock)) {
+ return; // already in
store
+ } else if (!overwrite) {
+ throw new
KeyCollisionException();
+ }
+ } catch (KeyVerifyException e) {
+ // ignore
+ }
+
+ // Overwrite old offset with same key
+ Entry entry = new Entry(routingKey,
header, data);
+ writeEntry(entry, oldOffset);
+ writes.incrementAndGet();
+ if (oldEntry.generation != generation)
+ keyCount.incrementAndGet();
+ return;
+ }
+
+ Entry entry = new Entry(routingKey, header,
data);
+ long[] offset = entry.getOffset();
+
+ for (int i = 0; i < offset.length; i++) {
+ if (isFree(offset[i])) {
+ // write to free block
+ if (logDEBUG)
+ Logger.debug(this,
"probing, write to i=" + i + ", offset=" + offset[i]);
+
bloomFilter.addKey(cipherManager.getDigestedKey(routingKey));
+ writeEntry(entry, offset[i]);
+ writes.incrementAndGet();
+ keyCount.incrementAndGet();
+
+ return;
+ }
+ }
+
+ // no free blocks, overwrite the first one
+ if (logDEBUG)
+ Logger.debug(this, "collision, write to
i=0, offset=" + offset[0]);
+
bloomFilter.addKey(cipherManager.getDigestedKey(routingKey));
+ oldEntry = readEntry(offset[0], null, false);
+ writeEntry(entry, offset[0]);
+ writes.incrementAndGet();
+ if (oldEntry.generation == generation)
+
bloomFilter.removeKey(oldEntry.getDigestedRoutingKey());
+ else
+ keyCount.incrementAndGet();
+ } finally {
+ unlockPlainKey(routingKey, false, lockMap);
+ }
+ } finally {
+ configLock.readLock().unlock();
+ }
+ }
+
+ // ------------- Entry I/O
+ // meta-data file
+ private File metaFile;
+ private RandomAccessFile metaRAF;
+ private FileChannel metaFC;
+ // header file
+ private File headerFile;
+ private RandomAccessFile headerRAF;
+ private FileChannel headerFC;
+ // data file
+ private File dataFile;
+ private RandomAccessFile dataRAF;
+ private FileChannel dataFC;
+
+ /**
+ * Data entry
+ *
+ * <pre>
+ * META-DATA BLOCK
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|1|2|3|4|5|6|7|8|9|A|B|C|D|E|F|
+ * +----+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0000| |
+ * +----+ Digested Routing Key |
+ * |0010| |
+ * +----+-------------------------------+
+ * |0020| Data Encrypt IV |
+ * +----+---------------+---------------+
+ * |0030| Flag | Store Size |
+ * +----+---------------+---------------+
+ * |0040| Plain Routing Key |
+ * |0050| (Only if ENTRY_FLAG_PLAINKEY) |
+ * +----+-------+-----------------------+
+ * |0060| Gen | Reserved |
+ * +----+-------+-----------------------+
+ * |0070| Reserved |
+ * +----+-------------------------------+
+ *
+ * Gen = Generation
+ * </pre>
+ */
+ class Entry {
+ /** Flag for occupied space */
+ private final static long ENTRY_FLAG_OCCUPIED = 0x00000001L;
+ /** Flag for plain key available */
+ private final static long ENTRY_FLAG_PLAINKEY = 0x00000002L;
+
+ /** Control block length */
+ private static final int METADATA_LENGTH = 0x80;
+
+ byte[] plainRoutingKey;
+ byte[] digestedRoutingKey;
+ byte[] dataEncryptIV;
+ private long flag;
+ private long storeSize;
+ private int generation;
+ byte[] header;
+ byte[] data;
+
+ boolean isEncrypted;
+ private long curOffset = -1;
+
+ private Entry() {
+ }
+
+ private Entry(ByteBuffer metaDataBuf, ByteBuffer headerBuf,
ByteBuffer dataBuf) {
+ assert metaDataBuf.remaining() == METADATA_LENGTH;
+
+ digestedRoutingKey = new byte[0x20];
+ metaDataBuf.get(digestedRoutingKey);
+
+ dataEncryptIV = new byte[0x10];
+ metaDataBuf.get(dataEncryptIV);
+
+ flag = metaDataBuf.getLong();
+ storeSize = metaDataBuf.getLong();
+
+ if ((flag & ENTRY_FLAG_PLAINKEY) != 0) {
+ plainRoutingKey = new byte[0x20];
+ metaDataBuf.get(plainRoutingKey);
+ }
+
+ metaDataBuf.position(0x60);
+ generation = metaDataBuf.getInt();
+
+ isEncrypted = true;
+
+ if (headerBuf != null && dataBuf != null)
+ setData(headerBuf, dataBuf);
+ }
+
+ /**
+ * Set header/data after construction.
+ *
+ * @param storeBuf
+ * @param store
+ */
+ private void setData(ByteBuffer headerBuf, ByteBuffer dataBuf) {
+ assert headerBuf.remaining() == headerBlockLength;
+ assert dataBuf.remaining() == dataBlockLength;
+ assert isEncrypted;
+
+ header = new byte[headerBlockLength];
+ headerBuf.get(header);
+
+ data = new byte[dataBlockLength];
+ dataBuf.get(data);
+ }
+
+ /**
+ * Create a new entry
+ *
+ * @param plainRoutingKey
+ * @param header
+ * @param data
+ */
+ private Entry(byte[] plainRoutingKey, byte[] header, byte[]
data) {
+ this.plainRoutingKey = plainRoutingKey;
+
+ flag = ENTRY_FLAG_OCCUPIED;
+ this.storeSize = SaltedHashFreenetStore.this.storeSize;
+ this.generation =
SaltedHashFreenetStore.this.generation;
+
+ // header/data will be overwritten in
encrypt()/decrypt(),
+ // let's make a copy here
+ this.header = new byte[headerBlockLength];
+ System.arraycopy(header, 0, this.header, 0,
headerBlockLength);
+ this.data = new byte[dataBlockLength];
+ System.arraycopy(data, 0, this.data, 0,
dataBlockLength);
+
+ if (OPTION_SAVE_PLAINKEY) {
+ flag |= ENTRY_FLAG_PLAINKEY;
+ }
+
+ isEncrypted = false;
+ }
+
+ private ByteBuffer toMetaDataBuffer() {
+ ByteBuffer out = ByteBuffer.allocate(METADATA_LENGTH);
+ cipherManager.encrypt(this, random);
+
+ out.put(getDigestedRoutingKey());
+ out.put(dataEncryptIV);
+ out.putLong(flag);
+ out.putLong(storeSize);
+
+ if ((flag & ENTRY_FLAG_PLAINKEY) != 0 &&
plainRoutingKey != null) {
+ assert plainRoutingKey.length == 0x20;
+ out.put(plainRoutingKey);
+ }
+
+ out.position(0x60);
+ out.putInt(generation);
+
+ out.position(0);
+ return out;
+ }
+
+ private ByteBuffer toHeaderBuffer() {
+ assert isEncrypted; // should have encrypted to get
dataEncryptIV in control buffer
+
+ if (header == null)
+ return null;
+
+ ByteBuffer out = ByteBuffer.allocate(headerBlockLength);
+ out.put(header);
+ assert out.remaining() == 0;
+
+ out.position(0);
+ return out;
+ }
+
+ private ByteBuffer toDataBuffer() {
+ assert isEncrypted; // should have encrypted to get
dataEncryptIV in control buffer
+
+ if (data == null)
+ return null;
+
+ ByteBuffer out = ByteBuffer.allocate(dataBlockLength);
+ out.put(data);
+ assert out.remaining() == 0;
+
+ out.position(0);
+ return out;
+ }
+
+ private StorableBlock getStorableBlock(byte[] routingKey,
byte[] fullKey) throws KeyVerifyException {
+ if (isFree() || header == null || data == null)
+ return null; // this is a free block
+ if (!cipherManager.decrypt(this, routingKey))
+ return null;
+
+ StorableBlock block = callback.construct(data, header,
routingKey, fullKey);
+ byte[] blockRoutingKey = block.getRoutingKey();
+
+ if (!Arrays.equals(blockRoutingKey, routingKey)) {
+ // can't recover, as decrypt() depends on a
correct route key
+ return null;
+ }
+
+ return block;
+ }
+
+ private long[] getOffset() {
+ if (digestedRoutingKey != null)
+ return
getOffsetFromDigestedKey(digestedRoutingKey, storeSize);
+ else
+ return getOffsetFromPlainKey(plainRoutingKey,
storeSize);
+ }
+
+ private boolean isFree() {
+ return (flag & ENTRY_FLAG_OCCUPIED) == 0;
+ }
+
+ byte[] getDigestedRoutingKey() {
+ if (digestedRoutingKey == null)
+ if (plainRoutingKey == null)
+ return null;
+ else
+ digestedRoutingKey =
cipherManager.getDigestedKey(plainRoutingKey);
+ return digestedRoutingKey;
+ }
+ }
+
+ /**
+ * Open all store files
+ *
+ * @param baseDir
+ * @param name
+ * @throws IOException
+ * @return <code>true</code> iff this is a new datastore
+ */
+ private boolean openStoreFiles(File baseDir, String name) throws
IOException {
+ metaFile = new File(baseDir, name + ".metadata");
+ headerFile = new File(baseDir, name + ".header");
+ dataFile = new File(baseDir, name + ".data");
+
+ boolean newStore = !metaFile.exists() || !headerFile.exists()
|| !dataFile.exists();
+
+ metaRAF = new RandomAccessFile(metaFile, "rw");
+ metaFC = metaRAF.getChannel();
+ metaFC.lock();
+
+ headerRAF = new RandomAccessFile(headerFile, "rw");
+ headerFC = headerRAF.getChannel();
+ headerFC.lock();
+
+ dataRAF = new RandomAccessFile(dataFile, "rw");
+ dataFC = dataRAF.getChannel();
+ dataFC.lock();
+
+ long storeFileSize = Math.max(storeSize, prevStoreSize);
+ WrapperManager.signalStarting(10 * 60 * 1000); // 10minutes,
for filesystem that support no sparse file.
+ setStoreFileSize(storeFileSize);
+
+ return newStore;
+ }
+
+ /**
+ * Read entry from disk. Before calling this function, you should
acquire all required locks.
+ *
+ * @return <code>null</code> if and only if <code>routingKey</code> is
not <code>null</code> and
+ * the key does not match the entry.
+ */
+ private Entry readEntry(long offset, byte[] routingKey, boolean
withData) throws IOException {
+ ByteBuffer mbf = ByteBuffer.allocate(Entry.METADATA_LENGTH);
+
+ do {
+ int status = metaFC.read(mbf, Entry.METADATA_LENGTH *
offset + mbf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (mbf.hasRemaining());
+ mbf.flip();
+
+ Entry entry = new Entry(mbf, null, null);
+ entry.curOffset = offset;
+
+ if (routingKey != null) {
+ if (entry.isFree())
+ return null;
+ if
(!Arrays.equals(cipherManager.getDigestedKey(routingKey),
entry.digestedRoutingKey))
+ return null;
+
+ if (withData) {
+ ByteBuffer headerBuf = readHeader(offset);
+ ByteBuffer dataBuf = readData(offset);
+ entry.setData(headerBuf, dataBuf);
+ boolean decrypted =
cipherManager.decrypt(entry, routingKey);
+ if (!decrypted)
+ return null;
+ }
+ }
+
+ return entry;
+ }
+
+ /**
+ * Read header from disk
+ *
+ * @param offset
+ * @throws IOException
+ */
+ private ByteBuffer readHeader(long offset) throws IOException {
+ ByteBuffer buf = ByteBuffer.allocate(headerBlockLength);
+
+ do {
+ int status = headerFC.read(buf, headerBlockLength *
offset + buf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (buf.hasRemaining());
+ buf.flip();
+ return buf;
+ }
+
+ /**
+ * Read data from disk
+ *
+ * @param offset
+ * @throws IOException
+ */
+ private ByteBuffer readData(long offset) throws IOException {
+ ByteBuffer buf = ByteBuffer.allocate(dataBlockLength);
+
+ do {
+ int status = dataFC.read(buf, dataBlockLength * offset
+ buf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (buf.hasRemaining());
+ buf.flip();
+ return buf;
+ }
+
+ private boolean isFree(long offset) throws IOException {
+ Entry entry = readEntry(offset, null, false);
+ return entry.isFree();
+ }
+
+ private byte[] getDigestedKeyFromOffset(long offset) throws IOException
{
+ Entry entry = readEntry(offset, null, false);
+ return entry.getDigestedRoutingKey();
+ }
+
+ /**
+ * Write entry to disk.
+ *
+ * Before calling this function, you should:
+ * <ul>
+ * <li>acquire all required locks</li>
+ * <li>update the entry with latest store size</li>
+ * </ul>
+ */
+ private void writeEntry(Entry entry, long offset) throws IOException {
+ cipherManager.encrypt(entry, random);
+
+ ByteBuffer bf = entry.toMetaDataBuffer();
+ do {
+ int status = metaFC.write(bf, Entry.METADATA_LENGTH *
offset + bf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (bf.hasRemaining());
+
+ bf = entry.toHeaderBuffer();
+ if (bf != null) {
+ do {
+ int status = headerFC.write(bf,
headerBlockLength * offset + bf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (bf.hasRemaining());
+
+ bf = entry.toDataBuffer();
+ do {
+ int status = dataFC.write(bf, dataBlockLength *
offset + bf.position());
+ if (status == -1)
+ throw new EOFException();
+ } while (bf.hasRemaining());
+ }
+
+ entry.curOffset = offset;
+ }
+
+ private void flushAndClose() {
+ try {
+ metaFC.force(true);
+ metaFC.close();
+ } catch (Exception e) {
+ Logger.error(this, "error flusing store", e);
+ }
+ try {
+ headerFC.force(true);
+ headerFC.close();
+ } catch (Exception e) {
+ Logger.error(this, "error flusing store", e);
+ }
+ try {
+ dataFC.force(true);
+ dataFC.close();
+ } catch (Exception e) {
+ Logger.error(this, "error flusing store", e);
+ }
+
+ bloomFilter.force();
+ }
+
+ /**
+ * Change on disk store file size
+ *
+ * @param storeFileSize
+ */
+ private void setStoreFileSize(long storeFileSize) {
+ try {
+ metaRAF.setLength(Entry.METADATA_LENGTH *
storeFileSize);
+ headerRAF.setLength(headerBlockLength * storeFileSize);
+ dataRAF.setLength(dataBlockLength * storeFileSize);
+ } catch (IOException e) {
+ Logger.error(this, "error resizing store file", e);
+ }
+ }
+
+ // ------------- Configuration
+ /**
+ * Configuration File
+ *
+ * <pre>
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|1|2|3|4|5|6|7|8|9|A|B|C|D|E|F|
+ * +----+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0000| Salt |
+ * +----+---------------+---------------+
+ * |0010| Store Size | prevStoreSize |
+ * +----+---------------+-------+-------+
+ * |0020| Est Key Count | Gen | Flags |
+ * +----+-------+-------+-------+-------+
+ * |0030| K | |
+ * +----+-------+-----------------------+
+ *
+ * Gen = Generation
+ * K = K for bloom filter
+ * </pre>
+ */
+ private final File configFile;
+
+ /**
+ * Load config file
+ *
+ * @return <code>true</code> iff this is a new datastore
+ */
+ private boolean loadConfigFile() throws IOException {
+ assert cipherManager == null; // never load the configuration
twice
+
+ if (!configFile.exists()) {
+ // create new
+ byte[] newsalt = new byte[0x10];
+ random.nextBytes(newsalt);
+ cipherManager = new CipherManager(newsalt);
+
+ writeConfigFile();
+ return true;
+ } else {
+ // try to load
+ RandomAccessFile raf = new RandomAccessFile(configFile,
"r");
+ byte[] salt = new byte[0x10];
+ raf.readFully(salt);
+ cipherManager = new CipherManager(salt);
+
+ storeSize = raf.readLong();
+ prevStoreSize = raf.readLong();
+ keyCount.set(raf.readLong());
+ generation = raf.readInt();
+ flags = raf.readInt();
+
+ if ((flags & FLAG_DIRTY) != 0)
+ flags |= FLAG_REBUILD_BLOOM;
+
+ try {
+ bloomFilterK = raf.readInt();
+ } catch (IOException e) {
+ flags |= FLAG_REBUILD_BLOOM;
+ }
+
+ raf.close();
+ return false;
+ }
+ }
+
+ /**
+ * Write config file
+ */
+ private void writeConfigFile() {
+ configLock.writeLock().lock();
+ try {
+ File tempConfig = new File(configFile.getPath() +
".tmp");
+ RandomAccessFile raf = new RandomAccessFile(tempConfig,
"rw");
+ raf.seek(0);
+ raf.write(cipherManager.getSalt());
+
+ raf.writeLong(storeSize);
+ raf.writeLong(prevStoreSize);
+ raf.writeLong(keyCount.get());
+ raf.writeInt(generation);
+ raf.writeInt(flags);
+ raf.writeInt(bloomFilterK);
+ raf.writeInt(0);
+ raf.writeLong(0);
+
+ raf.close();
+
+ FileUtil.renameTo(tempConfig, configFile);
+ } catch (IOException ioe) {
+ Logger.error(this, "error writing config file for " +
name, ioe);
+ } finally {
+ configLock.writeLock().unlock();
+ }
+ }
+
+ // ------------- Store resizing
+ private long prevStoreSize = 0;
+ private Lock cleanerLock = new ReentrantLock(); // local to this
datastore
+ private Condition cleanerCondition = cleanerLock.newCondition();
+ private static Lock cleanerGlobalLock = new ReentrantLock(); // global
across all datastore
+ private Cleaner cleanerThread;
+ private CleanerStatusUserAlert cleanerStatusUserAlert;
+
+ private final Entry NOT_MODIFIED = new Entry();
+
+ private interface BatchProcessor {
+ // initialize
+ void init();
+
+ // call this after reading RESIZE_MEMORY_ENTRIES entries
+ // return false to abort
+ boolean batch(long entriesLeft);
+
+ // call this on abort (e.g. node shutdown)
+ void abort();
+
+ void finish();
+
+ // return <code>null</code> to free the entry
+ // return NOT_MODIFIED to keep the old entry
+ Entry process(Entry entry);
+ }
+
+ private class Cleaner extends NativeThread {
+ /**
+ * How often the clean should run
+ */
+ private static final int CLEANER_PERIOD = 5 * 60 * 1000; // 5
minutes
+
+ private volatile boolean isRebuilding;
+ private volatile boolean isResizing;
+
+ public Cleaner() {
+ super("Store-" + name + "-Cleaner",
NativeThread.LOW_PRIORITY, false);
+ setPriority(MIN_PRIORITY);
+ setDaemon(true);
+ }
+
+ @Override
+ public void run() {
+ super.run();
+
+ try {
+ Thread.sleep((int)(CLEANER_PERIOD / 2 +
CLEANER_PERIOD * Math.random()));
+ } catch (InterruptedException e){}
+
+ if (shutdown)
+ return;
+
+ int loop = 0;
+ while (!shutdown) {
+ loop++;
+
+ cleanerLock.lock();
+ try {
+ long _prevStoreSize;
+ configLock.readLock().lock();
+ try {
+ _prevStoreSize = prevStoreSize;
+ } finally {
+ configLock.readLock().unlock();
+ }
+
+ if (_prevStoreSize != 0 &&
cleanerGlobalLock.tryLock()) {
+ try {
+ isResizing = true;
+
resizeStore(_prevStoreSize, true);
+ } finally {
+ isResizing = false;
+
cleanerGlobalLock.unlock();
+ }
+ }
+
+ boolean _rebuildBloom;
+ configLock.readLock().lock();
+ try {
+ _rebuildBloom = ((flags &
FLAG_REBUILD_BLOOM) != 0);
+ } finally {
+ configLock.readLock().unlock();
+ }
+ if (_rebuildBloom && prevStoreSize == 0
&& cleanerGlobalLock.tryLock()) {
+ try {
+ isRebuilding = true;
+ rebuildBloom(true);
+ } finally {
+ isRebuilding = false;
+
cleanerGlobalLock.unlock();
+ }
+ }
+
+ try {
+ if (loop % 6 == 0)
+ bloomFilter.force();
+ } catch (Exception e) { // may throw
IOException (even if it is not defined)
+ Logger.error(this, "Can't force
bloom filter", e);
+ }
+ writeConfigFile();
+
+ try {
+
cleanerCondition.await(CLEANER_PERIOD, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Logger.debug(this,
"interrupted", e);
+ }
+ } finally {
+ cleanerLock.unlock();
+ }
+ }
+ }
+
+ private static final int RESIZE_MEMORY_ENTRIES = 128; //
temporary memory store size (in # of entries)
+
+ /**
+ * Move old entries to new location and resize store
+ */
+ private void resizeStore(final long _prevStoreSize, final
boolean sleep) {
+ Logger.normal(this, "Starting datastore resize");
+
+ BatchProcessor resizeProcesser = new BatchProcessor() {
+ List<Entry> oldEntryList = new
LinkedList<Entry>();
+ int optimialK;
+
+ public void init() {
+ if (storeSize > _prevStoreSize)
+ setStoreFileSize(storeSize);
+
+ optimialK =
BloomFilter.optimialK(bloomFilterSize, storeSize);
+ configLock.writeLock().lock();
+ try {
+ generation++;
+ bloomFilter.fork(optimialK);
+ keyCount.set(0);
+ } finally {
+ configLock.writeLock().unlock();
+ }
+
+
WrapperManager.signalStarting(RESIZE_MEMORY_ENTRIES * 30 * 1000 + 1000);
+ }
+
+ public Entry process(Entry entry) {
+ int oldGeneration = entry.generation;
+ if (oldGeneration != generation) {
+ entry.generation = generation;
+ keyCount.incrementAndGet();
+ }
+
+ if (entry.storeSize == storeSize) {
+ // new size, don't have to
relocate
+ if (entry.generation !=
generation) {
+ // update filter
+
bloomFilter.addKey(entry.getDigestedRoutingKey());
+ return entry;
+ } else {
+ return NOT_MODIFIED;
+ }
+ }
+
+ // remove from store, prepare for
relocation
+ if (oldGeneration == generation) {
+ // should be impossible
+ Logger.error(this, //
+ "new generation object
with wrong storeSize. DigestedRoutingKey=" //
+ +
HexUtil.bytesToHex(entry.getDigestedRoutingKey()) //
+ + ", Offset=" +
entry.curOffset);
+
bloomFilter.removeKey(entry.getDigestedRoutingKey());
+ }
+ try {
+
entry.setData(readHeader(entry.curOffset), readData(entry.curOffset));
+ oldEntryList.add(entry);
+ if (oldEntryList.size() >
RESIZE_MEMORY_ENTRIES)
+ oldEntryList.remove(0);
+ } catch (IOException e) {
+ Logger.error(this, "error
reading entry (offset=" + entry.curOffset + ")", e);
+ }
+ return null;
+ }
+
+ int i = 0;
+ public boolean batch(long entriesLeft) {
+
WrapperManager.signalStarting(RESIZE_MEMORY_ENTRIES * 30 * 1000 + 1000);
+
+ if (i++ % 16 == 0)
+ writeConfigFile();
+
+ // shrink data file to current size
+ if (storeSize < _prevStoreSize)
+
setStoreFileSize(Math.max(storeSize, entriesLeft));
+
+ // try to resolve the list
+ ListIterator<Entry> it =
oldEntryList.listIterator();
+ while (it.hasNext())
+ if (resolveOldEntry(it.next()))
+ it.remove();
+
+ return _prevStoreSize == prevStoreSize;
+ }
+
+ public void abort() {
+ bloomFilter.discard();
+ }
+
+ public void finish() {
+ configLock.writeLock().lock();
+ try {
+ if (_prevStoreSize !=
prevStoreSize)
+ return;
+ bloomFilter.merge();
+ prevStoreSize = 0;
+
+ flags &= ~FLAG_REBUILD_BLOOM;
+ checkBloom = true;
+ bloomFilterK = optimialK;
+ } finally {
+ configLock.writeLock().unlock();
+ }
+
+ Logger.normal(this, "Finish resizing ("
+ name + ")");
+ }
+ };
+
+ batchProcessEntries(resizeProcesser, _prevStoreSize,
true, sleep);
+ }
+
+ /**
+ * Rebuild bloom filter
+ */
+ private void rebuildBloom(boolean sleep) {
+ if (bloomFilter == null)
+ return;
+ Logger.normal(this, "Start rebuilding bloom filter (" +
name + ")");
+
+ BatchProcessor rebuildBloomProcessor = new
BatchProcessor() {
+ int optimialK;
+
+ public void init() {
+ optimialK =
BloomFilter.optimialK(bloomFilterSize, storeSize);
+
+ configLock.writeLock().lock();
+ try {
+ generation++;
+ bloomFilter.fork(bloomFilterK);
+ keyCount.set(0);
+ } finally {
+ configLock.writeLock().unlock();
+ }
+
+
WrapperManager.signalStarting(RESIZE_MEMORY_ENTRIES * 5 * 1000 + 1000);
+ }
+
+ public Entry process(Entry entry) {
+ if (entry.generation != generation) {
+
bloomFilter.addKey(entry.getDigestedRoutingKey());
+ keyCount.incrementAndGet();
+
+ entry.generation = generation;
+ return entry;
+ }
+ return NOT_MODIFIED;
+ }
+
+ int i = 0;
+ public boolean batch(long entriesLeft) {
+
WrapperManager.signalStarting(RESIZE_MEMORY_ENTRIES * 5 * 1000 + 1000);
+
+ if (i++ % 16 == 0)
+ writeConfigFile();
+
+ return prevStoreSize == 0;
+ }
+
+ public void abort() {
+ bloomFilter.discard();
+ }
+
+ public void finish() {
+ bloomFilter.merge();
+ configLock.writeLock().lock();
+ try {
+ flags &= ~FLAG_REBUILD_BLOOM;
+ checkBloom = true;
+ bloomFilterK = optimialK;
+ } finally {
+ configLock.writeLock().unlock();
+ }
+
+ Logger.normal(this, "Finish rebuilding
bloom filter (" + name + ")");
+ }
+ };
+
+ batchProcessEntries(rebuildBloomProcessor, storeSize,
false, sleep);
+ }
+
+ private volatile long entriesLeft;
+ private volatile long entriesTotal;
+
+ private void batchProcessEntries(BatchProcessor processor, long
storeSize, boolean reverse, boolean sleep) {
+ entriesLeft = entriesTotal = storeSize;
+
+ long startOffset, step;
+ if (!reverse) {
+ startOffset = 0;
+ step = RESIZE_MEMORY_ENTRIES;
+ } else {
+ startOffset = ((storeSize - 1) /
RESIZE_MEMORY_ENTRIES) * RESIZE_MEMORY_ENTRIES;
+ step = -RESIZE_MEMORY_ENTRIES;
+ }
+
+ int i = 0;
+ processor.init();
+ try {
+ for (long curOffset = startOffset; curOffset >=
0 && curOffset < storeSize; curOffset += step) {
+ if (shutdown) {
+ processor.abort();
+ return;
+ }
+
+ if (i++ % 64 == 0)
+ System.err.println(name + "
cleaner in progress: " + (entriesTotal - entriesLeft) + "/"
+ + entriesTotal);
+
+ batchProcessEntries(curOffset,
RESIZE_MEMORY_ENTRIES, processor);
+ entriesLeft = reverse ? curOffset :
Math.max(storeSize - curOffset - RESIZE_MEMORY_ENTRIES, 0);
+ if (!processor.batch(entriesLeft)) {
+ processor.abort();
+ return;
+ }
+
+ try {
+ if (sleep)
+ Thread.sleep(500);
+ } catch (InterruptedException e) {
+ processor.abort();
+ return;
+ }
+ }
+ processor.finish();
+ } catch (Exception e) {
+ processor.abort();
+ }
+ }
+
+ /**
+ * Read a list of items from store.
+ *
+ * @param offset
+ * start offset, must be multiple of {@link
FILE_SPLIT}
+ * @param length
+ * number of items to read, must be multiple of
{@link FILE_SPLIT}. If this
+ * excess store size, read as much as possible.
+ * @param processor
+ * batch processor
+ * @return <code>true</code> if operation complete
successfully; <code>false</code>
+ * otherwise (e.g. can't acquire locks, node shutting
down)
+ */
+ private boolean batchProcessEntries(long offset, int length,
BatchProcessor processor) {
+ Condition[] locked = new Condition[length];
+ try {
+ // acquire all locks in the region, will unlock
in the finally block
+ for (int i = 0; i < length; i++) {
+ locked[i] =
lockManager.lockEntry(offset + i);
+ if (locked[i] == null)
+ return false;
+ }
+
+ long startFileOffset = offset *
Entry.METADATA_LENGTH;
+ long entriesToRead = length;
+ long bufLen = Entry.METADATA_LENGTH *
entriesToRead;
+
+ ByteBuffer buf = ByteBuffer.allocate((int)
bufLen);
+ boolean dirty = false;
+ try {
+ while (buf.hasRemaining()) {
+ int status = metaFC.read(buf,
startFileOffset + buf.position());
+ if (status == -1)
+ break;
+ }
+ } catch (IOException ioe) {
+ if (shutdown)
+ return false;
+ Logger.error(this, "unexpected
IOException", ioe);
+ }
+ buf.flip();
+
+ try {
+ for (int j = 0; !shutdown &&
buf.limit() > j * Entry.METADATA_LENGTH; j++) {
+ buf.position(j *
Entry.METADATA_LENGTH);
+ if (buf.remaining() <
Entry.METADATA_LENGTH) // EOF
+ break;
+
+ ByteBuffer enBuf = buf.slice();
+
enBuf.limit(Entry.METADATA_LENGTH);
+
+ Entry entry = new Entry(enBuf,
null, null);
+ entry.curOffset = offset + j;
+
+ if (entry.isFree())
+ continue; // not
occupied
+
+ Entry newEntry =
processor.process(entry);
+ if (newEntry == null) {// free
the offset
+ buf.position(j *
Entry.METADATA_LENGTH);
+
buf.put(ByteBuffer.allocate(Entry.METADATA_LENGTH));
+
keyCount.decrementAndGet();
+
+ dirty = true;
+ } else if (newEntry ==
NOT_MODIFIED) {
+ } else {
+ // write back
+ buf.position(j *
Entry.METADATA_LENGTH);
+
buf.put(newEntry.toMetaDataBuffer());
+
+ assert newEntry.header
== null; // not supported
+ assert newEntry.data ==
null; // not supported
+
+ dirty = true;
+ }
+ }
+ } finally {
+ // write back.
+ if (dirty) {
+ buf.flip();
+
+ try {
+ while
(buf.hasRemaining()) {
+
metaFC.write(buf, startFileOffset + buf.position());
+ }
+ } catch (IOException ioe) {
+ Logger.error(this,
"unexpected IOException", ioe);
+ }
+ }
+ }
+
+ return true;
+ } finally {
+ // unlock
+ for (int i = 0; i < length; i++)
+ if (locked[i] != null)
+ lockManager.unlockEntry(offset
+ i, locked[i]);
+ }
+ }
+
+ /**
+ * Put back an old entry to store file
+ *
+ * @param entry
+ * @return <code>true</code> if the entry have put back
successfully.
+ */
+ private boolean resolveOldEntry(Entry entry) {
+ Map<Long, Condition> lockMap =
lockDigestedKey(entry.getDigestedRoutingKey(), false);
+ if (lockMap == null)
+ return false;
+ try {
+ entry.storeSize = storeSize;
+ long[] offsets = entry.getOffset();
+
+ // Check for occupied entry with same key
+ for (long offset : offsets) {
+ try {
+ if (!isFree(offset)
+ &&
Arrays.equals(getDigestedKeyFromOffset(offset), entry.getDigestedRoutingKey()))
{
+ // do nothing
+ return true;
+ }
+ } catch (IOException e) {
+ Logger.debug(this,
"IOExcception on resolveOldEntry", e);
+ }
+ }
+
+ // Check for free entry
+ for (long offset : offsets) {
+ try {
+ if (isFree(offset)) {
+ writeEntry(entry,
offset);
+
bloomFilter.addKey(entry.getDigestedRoutingKey());
+
keyCount.incrementAndGet();
+ return true;
+ }
+ } catch (IOException e) {
+ Logger.debug(this,
"IOExcception on resolveOldEntry", e);
+ }
+ }
+ return false;
+ } finally {
+
unlockDigestedKey(entry.getDigestedRoutingKey(), false, lockMap);
+ }
+ }
+ }
+
+ private final class CleanerStatusUserAlert implements UserAlert {
+ private Cleaner cleaner;
+
+ private CleanerStatusUserAlert(Cleaner cleaner) {
+ this.cleaner = cleaner;
+ }
+
+ public String anchor() {
+ return "store-cleaner-" + name;
+ }
+
+ public String dismissButtonText() {
+ return L10n.getString("UserAlert.hide");
+ }
+
+ public HTMLNode getHTMLText() {
+ return new HTMLNode("#", getText());
+ }
+
+ public short getPriorityClass() {
+ return UserAlert.MINOR;
+ }
+
+ public String getShortText() {
+ if (cleaner.isResizing)
+ return
L10n.getString("SaltedHashFreenetStore.shortResizeProgress", //
+ new String[] { "name", "processed",
"total" },//
+ new String[] { name,
(cleaner.entriesTotal - cleaner.entriesLeft) + "",
+ cleaner.entriesTotal + "" });
+ else
+ return
L10n.getString("SaltedHashFreenetStore.shortRebuildProgress", //
+ new String[] { "name", "processed",
"total" },//
+ new String[] { name,
(cleaner.entriesTotal - cleaner.entriesLeft) + "",
+ cleaner.entriesTotal + "" });
+ }
+
+ public String getText() {
+ if (cleaner.isResizing)
+ return
L10n.getString("SaltedHashFreenetStore.longResizeProgress", //
+ new String[] { "name", "processed",
"total" },//
+ new String[] { name,
(cleaner.entriesTotal - cleaner.entriesLeft) + "",
+ cleaner.entriesTotal + "" });
+ else
+ return
L10n.getString("SaltedHashFreenetStore.longRebuildProgress", //
+ new String[] { "name", "processed",
"total" },//
+ new String[] { name,
(cleaner.entriesTotal - cleaner.entriesLeft) + "",
+ cleaner.entriesTotal + "" });
+ }
+
+ public String getTitle() {
+ return
L10n.getString("SaltedHashFreenetStore.cleanerAlertTitle", //
+ new String[] { "name" }, //
+ new String[] { name });
+ }
+
+ public Object getUserIdentifier() {
+ return null;
+ }
+
+ public boolean isValid() {
+ return cleaner.isRebuilding || cleaner.isResizing;
+ }
+
+ public void isValid(boolean validity) {
+ // Ignore
+ }
+
+ public void onDismiss() {
+ // Ignore
+ }
+
+ public boolean shouldUnregisterOnDismiss() {
+ return true;
+ }
+
+ public boolean userCanDismiss() {
+ return false;
+ }
+
+ public boolean isEventNotification() {
+ return false;
+ }
+ }
+
+ public void setUserAlertManager(UserAlertManager userAlertManager) {
+ if (cleanerStatusUserAlert != null)
+ userAlertManager.register(cleanerStatusUserAlert);
+ // TODO change useralertmanager? is this a valid case?
+ this.userAlertManager = userAlertManager;
+ }
+
+ public void setMaxKeys(long newStoreSize, boolean shrinkNow) throws
IOException {
+ Logger.normal(this, "[" + name + "] Resize newStoreSize=" +
newStoreSize + ", shinkNow=" + shrinkNow);
+
+ configLock.writeLock().lock();
+ try {
+ if (newStoreSize == this.storeSize)
+ return;
+
+ if (prevStoreSize != 0) {
+ Logger.normal(this, "[" + name + "] resize
already in progress, ignore resize request");
+ return;
+ }
+
+ prevStoreSize = storeSize;
+ storeSize = newStoreSize;
+ writeConfigFile();
+ } finally {
+ configLock.writeLock().unlock();
+ }
+
+ if (cleanerLock.tryLock()) {
+ cleanerCondition.signal();
+ cleanerLock.unlock();
+ }
+ }
+
+ // ------------- Locking
+ volatile boolean shutdown = false;
+ private LockManager lockManager;
+ private ReadWriteLock configLock = new ReentrantReadWriteLock();
+
+ /**
+ * Lock all possible offsets of a key. This method would release the
locks if any locking
+ * operation failed.
+ *
+ * @param plainKey
+ * @return <code>true</code> if all the offsets are locked.
+ */
+ private Map<Long, Condition> lockPlainKey(byte[] plainKey, boolean
usePrevStoreSize) {
+ return lockDigestedKey(cipherManager.getDigestedKey(plainKey),
usePrevStoreSize);
+ }
+
+ private void unlockPlainKey(byte[] plainKey, boolean usePrevStoreSize,
Map<Long, Condition> lockMap) {
+ unlockDigestedKey(cipherManager.getDigestedKey(plainKey),
usePrevStoreSize, lockMap);
+ }
+
+ /**
+ * Lock all possible offsets of a key. This method would release the
locks if any locking
+ * operation failed.
+ *
+ * @param digestedKey
+ * @return <code>true</code> if all the offsets are locked.
+ */
+ private Map<Long, Condition> lockDigestedKey(byte[] digestedKey,
boolean usePrevStoreSize) {
+ // use a set to prevent duplicated offsets,
+ // a sorted set to prevent deadlocks
+ SortedSet<Long> offsets = new TreeSet<Long>();
+ long[] offsetArray = getOffsetFromDigestedKey(digestedKey,
storeSize);
+ for (long offset : offsetArray)
+ offsets.add(offset);
+ if (usePrevStoreSize && prevStoreSize != 0) {
+ offsetArray = getOffsetFromDigestedKey(digestedKey,
prevStoreSize);
+ for (long offset : offsetArray)
+ offsets.add(offset);
+ }
+
+ Map<Long, Condition> locked = new TreeMap<Long, Condition>();
+ for (long offset : offsets) {
+ Condition condition = lockManager.lockEntry(offset);
+ if (condition == null)
+ break;
+ locked.put(offset, condition);
+ }
+
+ if (locked.size() == offsets.size()) {
+ return locked;
+ } else {
+ // failed, remove the locks
+ for (Map.Entry<Long, Condition> e : locked.entrySet())
+ lockManager.unlockEntry(e.getKey(),
e.getValue());
+ return null;
+ }
+ }
+
+ private void unlockDigestedKey(byte[] digestedKey, boolean
usePrevStoreSize, Map<Long, Condition> lockMap) {
+ // use a set to prevent duplicated offsets
+ SortedSet<Long> offsets = new TreeSet<Long>();
+ long[] offsetArray = getOffsetFromDigestedKey(digestedKey,
storeSize);
+ for (long offset : offsetArray)
+ offsets.add(offset);
+ if (usePrevStoreSize && prevStoreSize != 0) {
+ offsetArray = getOffsetFromDigestedKey(digestedKey,
prevStoreSize);
+ for (long offset : offsetArray)
+ offsets.add(offset);
+ }
+
+ for (long offset : offsets) {
+ lockManager.unlockEntry(offset, lockMap.get(offset));
+ lockMap.remove(offset);
+ }
+ }
+
+ public class ShutdownDB implements Runnable {
+ public void run() {
+ shutdown = true;
+ lockManager.shutdown();
+
+ cleanerLock.lock();
+ try {
+ cleanerCondition.signalAll();
+ cleanerThread.interrupt();
+ } finally {
+ cleanerLock.unlock();
+ }
+
+ configLock.writeLock().lock();
+ try {
+ flushAndClose();
+ flags &= ~FLAG_DIRTY; // clean shutdown
+ writeConfigFile();
+ } finally {
+ configLock.writeLock().unlock();
+ }
+ }
+ }
+
+ // ------------- Hashing
+ private CipherManager cipherManager;
+
+ /**
+ * Get offset in the hash table, given a plain routing key.
+ *
+ * @param plainKey
+ * @param storeSize
+ * @return
+ */
+ private long[] getOffsetFromPlainKey(byte[] plainKey, long storeSize) {
+ return
getOffsetFromDigestedKey(cipherManager.getDigestedKey(plainKey), storeSize);
+ }
+
+ /**
+ * Get offset in the hash table, given a digested routing key.
+ *
+ * @param digestedKey
+ * @param storeSize
+ * @return
+ */
+ private long[] getOffsetFromDigestedKey(byte[] digestedKey, long
storeSize) {
+ long keyValue = Fields.bytesToLong(digestedKey);
+ long[] offsets = new long[OPTION_MAX_PROBE];
+
+ for (int i = 0; i < OPTION_MAX_PROBE; i++) {
+ // h + 141 i^2 + 13 i
+ offsets[i] = ((keyValue + 141 * (i * i) + 13 * i) &
Long.MAX_VALUE) % storeSize;
+ }
+
+ return offsets;
+ }
+
+ // ------------- Statistics (a.k.a. lies)
+ private AtomicLong hits = new AtomicLong();
+ private AtomicLong misses = new AtomicLong();
+ private AtomicLong writes = new AtomicLong();
+ private AtomicLong keyCount = new AtomicLong();
+ private AtomicLong bloomFalsePos = new AtomicLong();
+
+ public long hits() {
+ return hits.get();
+ }
+
+ public long misses() {
+ return misses.get();
+ }
+
+ public long writes() {
+ return writes.get();
+ }
+
+ public long keyCount() {
+ return keyCount.get();
+ }
+
+ public long getMaxKeys() {
+ configLock.readLock().lock();
+ long _storeSize = storeSize;
+ configLock.readLock().unlock();
+ return _storeSize;
+ }
+
+ public long getBloomFalsePositive() {
+ return bloomFalsePos.get();
+ }
+
+ // ------------- Migration
+ public void migrationFrom(File storeFile, File keyFile) {
+ try {
+ System.out.println("Migrating from " + storeFile);
+
+ RandomAccessFile storeRAF = new
RandomAccessFile(storeFile, "r");
+ RandomAccessFile keyRAF = keyFile.exists() ? new
RandomAccessFile(keyFile, "r") : null;
+
+ byte[] header = new byte[headerBlockLength];
+ byte[] data = new byte[dataBlockLength];
+ byte[] key = new byte[fullKeyLength];
+
+ long maxKey = storeRAF.length() / (headerBlockLength +
dataBlockLength);
+
+ for (int l = 0; l < maxKey; l++) {
+ if (l % 1024 == 0) {
+ System.out.println(" migrating key " +
l + "/" + maxKey);
+ WrapperManager.signalStarting(10 * 60 *
1000); // max 10 minutes for every 1024 keys
+ }
+
+ boolean keyRead = false;
+ storeRAF.readFully(header);
+ storeRAF.readFully(data);
+ try {
+ if (keyRAF != null) {
+ keyRAF.readFully(key);
+ keyRead = true;
+ }
+ } catch (IOException e) {
+ }
+
+ try {
+ StorableBlock b =
callback.construct(data, header, null, keyRead ? key : null);
+ put(b, b.getRoutingKey(),
b.getFullKey(), data, header, true);
+ } catch (KeyVerifyException e) {
+ System.out.println("kve at block " + l);
+ } catch (KeyCollisionException e) {
+ System.out.println("kce at block " + l);
+ }
+ }
+ } catch (EOFException eof) {
+ // done
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public boolean probablyInStore(byte[] routingKey) {
+ configLock.readLock().lock();
+ try {
+ if (!checkBloom)
+ return true;
+ return
bloomFilter.checkFilter(cipherManager.getDigestedKey(routingKey));
+ } finally {
+ configLock.readLock().unlock();
+ }
+ }
+}
Added: trunk/freenet/src/freenet/support/BinaryBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/BinaryBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/BinaryBloomFilter.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -0,0 +1,81 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel.MapMode;
+
+/**
+ * @author sdiz
+ */
+public class BinaryBloomFilter extends BloomFilter {
+ /**
+ * Constructor
+ *
+ * @param length
+ * length in bits
+ */
+ protected BinaryBloomFilter(int length, int k) {
+ super(length, k);
+ filter = ByteBuffer.allocate(this.length / 8);
+ }
+
+ /**
+ * Constructor
+ *
+ * @param file
+ * disk file
+ * @param length
+ * length in bits
+ * @throws IOException
+ */
+ protected BinaryBloomFilter(File file, int length, int k) throws
IOException {
+ super(length, k);
+ if (!file.exists() || file.length() != length / 8)
+ needRebuild = true;
+
+ RandomAccessFile raf = new RandomAccessFile(file, "rw");
+ raf.setLength(length / 8);
+ filter = raf.getChannel().map(MapMode.READ_WRITE, 0, length /
8).load();
+ }
+
+ @Override
+ public void removeKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ protected boolean getBit(int offset) {
+ return (filter.get(offset / 8) & (1 << (offset % 8))) != 0;
+ }
+
+ @Override
+ protected void setBit(int offset) {
+ byte b = filter.get(offset / 8);
+ b |= 1 << (offset % 8);
+ filter.put(offset / 8, b);
+ }
+
+ @Override
+ protected void unsetBit(int offset) {
+ // NO-OP
+ }
+
+ @Override
+ public void fork(int k) {
+ lock.writeLock().lock();
+ try {
+ File tempFile = File.createTempFile("bloom-", ".tmp");
+ tempFile.deleteOnExit();
+ forkedFilter = new BinaryBloomFilter(tempFile, length,
k);
+ } catch (IOException e) {
+ forkedFilter = new BinaryBloomFilter(length, k);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+}
Added: trunk/freenet/src/freenet/support/BloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/BloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/BloomFilter.java 2008-09-02 15:32:21 UTC
(rev 22351)
@@ -0,0 +1,187 @@
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.util.Random;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.spaceroots.mantissa.random.MersenneTwister;
+
+public abstract class BloomFilter {
+ protected ByteBuffer filter;
+
+ /** Number of hash functions */
+ protected final int k;
+ protected final int length;
+
+ protected ReadWriteLock lock = new ReentrantReadWriteLock();
+
+ public static BloomFilter createFilter(int length, int k, boolean
counting) {
+ if (k == 0 || length == 0)
+ return new NullBloomFilter(length, k);
+ if (counting)
+ return new CountingBloomFilter(length, k);
+ else
+ return new BinaryBloomFilter(length, k);
+ }
+
+ public static BloomFilter createFilter(File file, int length, int k,
boolean counting) throws IOException {
+ if (k == 0 || length == 0)
+ return new NullBloomFilter(length, k);
+ if (counting)
+ return new CountingBloomFilter(file, length, k);
+ else
+ return new BinaryBloomFilter(file, length, k);
+ }
+
+ protected BloomFilter(int length, int k) {
+ if (length % 8 != 0)
+ length -= length % 8;
+
+ this.length = length;
+ this.k = k;
+ }
+
+ //-- Core
+ public void addKey(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.writeLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ setBit(hashes.nextInt(length));
+ } finally {
+ lock.writeLock().unlock();
+ }
+
+ if (forkedFilter != null)
+ forkedFilter.addKey(key);
+ }
+
+ public boolean checkFilter(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.readLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ if (!getBit(hashes.nextInt(length)))
+ return false;
+ } finally {
+ lock.readLock().unlock();
+ }
+ return true;
+ }
+
+ public void removeKey(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.writeLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ unsetBit(hashes.nextInt(length));
+ } finally {
+ lock.writeLock().unlock();
+ }
+
+ if (forkedFilter != null)
+ forkedFilter.removeKey(key);
+ }
+
+ //-- Bits and Hashes
+ protected abstract boolean getBit(int offset);
+
+ protected abstract void setBit(int offset);
+
+ protected abstract void unsetBit(int offset);
+
+ protected Random getHashes(byte[] key) {
+ return new MersenneTwister(key);
+ }
+
+ //-- Fork & Merge
+ protected BloomFilter forkedFilter;
+
+ /**
+ * Create an empty, in-memory copy of bloom filter. New updates are
written to both filters.
+ * This is written back to disk on #merge()
+ */
+ public abstract void fork(int k);
+
+ public void merge() {
+ lock.writeLock().lock();
+ try {
+ if (forkedFilter == null)
+ return;
+
+ filter.position(0);
+ forkedFilter.filter.position(0);
+
+ filter.put(forkedFilter.filter);
+
+ filter.position(0);
+ forkedFilter.finalize();
+ forkedFilter = null;
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ public void discard() {
+ lock.writeLock().lock();
+ try {
+ if (forkedFilter == null)
+ return;
+ forkedFilter.finalize();
+ forkedFilter = null;
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ //-- Misc.
+ /**
+ * Calculate optimal K value
+ *
+ * @param filterLength
+ * filter length in bits
+ * @param maxKey
+ * @return optimal K
+ */
+ // may return 0 if the length is too short
+ public static int optimialK(int filterLength, long maxKey) {
+ long k = Math.round(Math.log(2) * filterLength / maxKey);
+
+ if (k > 64)
+ k = 64;
+
+ return (int) k;
+ }
+
+ public int getK() {
+ return k;
+ }
+
+ protected boolean needRebuild;
+
+ public boolean needRebuild() {
+ boolean _needRebuild = needRebuild;
+ needRebuild = false;
+ return _needRebuild;
+
+ }
+
+ public void force() {
+ if (filter instanceof MappedByteBuffer) {
+ ((MappedByteBuffer) filter).force();
+ }
+ }
+
+ @Override
+ protected void finalize() {
+ if (filter != null) {
+ force();
+ }
+ filter = null;
+ forkedFilter = null;
+ }
+}
Added: trunk/freenet/src/freenet/support/CountingBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/CountingBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/CountingBloomFilter.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -0,0 +1,102 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel.MapMode;
+
+/**
+ * @author sdiz
+ */
+public class CountingBloomFilter extends BloomFilter {
+ /**
+ * Constructor
+ *
+ * @param length
+ * length in bits
+ */
+ protected CountingBloomFilter(int length, int k) {
+ super(length, k);
+ filter = ByteBuffer.allocate(this.length / 4);
+ }
+
+ /**
+ * Constructor
+ *
+ * @param file
+ * disk file
+ * @param length
+ * length in bits
+ * @throws IOException
+ */
+ protected CountingBloomFilter(File file, int length, int k) throws
IOException {
+ super(length, k);
+ int fileLength = length / 4;
+ if (!file.exists() || file.length() != fileLength)
+ needRebuild = true;
+
+ RandomAccessFile raf = new RandomAccessFile(file, "rw");
+ raf.setLength(fileLength);
+ filter = raf.getChannel().map(MapMode.READ_WRITE, 0,
fileLength).load();
+ }
+
+ public CountingBloomFilter(int length, int k, byte[] buffer) {
+ super(length, k);
+ assert(buffer.length == length / 4);
+ filter = ByteBuffer.wrap(buffer);
+ }
+
+ @Override
+ public boolean getBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ return v != 0;
+ }
+
+ @Override
+ public void setBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ if (v == 3)
+ return; // overflow
+
+ b &= ~(3 << offset % 4 * 2); // unset bit
+ b |= (v + 1) << offset % 4 * 2; // set bit
+
+ filter.put(offset / 4, b);
+ }
+
+ @Override
+ public void unsetBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ if (v == 0 || v == 3)
+ return; // overflow / underflow
+
+ b &= ~(3 << offset % 4 * 2); // unset bit
+ b |= (v - 1) << offset % 4 * 2; // set bit
+
+ filter.put(offset / 4, b);
+ }
+
+ @Override
+ public void fork(int k) {
+ lock.writeLock().lock();
+ try {
+ File tempFile = File.createTempFile("bloom-", ".tmp");
+ tempFile.deleteOnExit();
+ forkedFilter = new CountingBloomFilter(tempFile,
length, k);
+ } catch (IOException e) {
+ forkedFilter = new CountingBloomFilter(length, k);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+}
Added: trunk/freenet/src/freenet/support/NullBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/NullBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/NullBloomFilter.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -0,0 +1,59 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+/**
+ * @author sdiz
+ */
+public class NullBloomFilter extends BloomFilter {
+ protected NullBloomFilter(int length, int k) {
+ super(length, k);
+ }
+
+ @Override
+ public boolean checkFilter(byte[] key) {
+ return true;
+ }
+
+ @Override
+ public void addKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ public void removeKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ protected boolean getBit(int offset) {
+ // ignore
+ return true;
+ }
+
+ @Override
+ protected void setBit(int offset) {
+ // ignore
+ }
+
+ @Override
+ protected void unsetBit(int offset) {
+ // ignore
+ }
+
+ @Override
+ public void fork(int k) {
+ return;
+ }
+
+ @Override
+ public void discard() {
+ return;
+ }
+
+ @Override
+ public void merge() {
+ return;
+ }
+}
Added: trunk/freenet/test/freenet/support/BloomFilterTest.java
===================================================================
--- trunk/freenet/test/freenet/support/BloomFilterTest.java
(rev 0)
+++ trunk/freenet/test/freenet/support/BloomFilterTest.java 2008-09-02
15:32:21 UTC (rev 22351)
@@ -0,0 +1,154 @@
+package freenet.support;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.spaceroots.mantissa.random.MersenneTwister;
+
+public class BloomFilterTest extends TestCase {
+ private static final int FILTER_SIZE = 4 * 1024; // MUST be > PASS,
+ private static final int PASS = 2048;
+ private static final int PASS_REMOVE = 4096;
+ private static final int PASS_POS = 256;
+ private static final int PASS_FALSE = 8192;
+
+ private final Random rand = new MersenneTwister();
+
+ private void _testFilterPositive(BloomFilter filter) {
+ byte[][] list = new byte[PASS_POS][];
+
+ for (int i = 0; i < PASS_POS; i++) {
+ byte[] b = new byte[32];
+ rand.nextBytes(b);
+
+ filter.addKey(b);
+ list[i] = b;
+ }
+
+ for (byte[] b : list) {
+ assertTrue(filter.checkFilter(b));
+ }
+ }
+
+ public void testCountingFilterPositive() {
+ int K = BloomFilter.optimialK(FILTER_SIZE, PASS_POS);
+ BloomFilter filter = BloomFilter.createFilter(FILTER_SIZE, K,
true);
+ _testFilterPositive(filter);
+ }
+
+ public void testBinaryFilterPositive() {
+ int K = BloomFilter.optimialK(FILTER_SIZE, PASS_POS);
+ BloomFilter filter = BloomFilter.createFilter(FILTER_SIZE, K,
false);
+ _testFilterPositive(filter);
+ }
+
+ public void testCountingFilterRemove() {
+ int K = BloomFilter.optimialK(FILTER_SIZE, PASS);
+ BloomFilter filter = BloomFilter.createFilter(FILTER_SIZE, K,
true);
+
+ Map<ByteArrayWrapper, byte[]> baseList = new
HashMap<ByteArrayWrapper, byte[]>();
+
+ // Add Keys
+ for (int i = 0; i < PASS; i++) {
+ byte[] b = new byte[32];
+ do {
+ rand.nextBytes(b);
+ } while (baseList.containsKey(new ByteArrayWrapper(b)));
+
+ filter.addKey(b);
+ baseList.put(new ByteArrayWrapper(b), b);
+ assertTrue("check add BASE", filter.checkFilter(b));
+ }
+
+ // Add some FALSE_PASS keys
+ Map<ByteArrayWrapper, byte[]> newList = new
HashMap<ByteArrayWrapper, byte[]>();
+ int fPos = 0;
+ for (int i = 0; i < PASS_REMOVE; i++) {
+ byte[] b = new byte[64];
+ ByteArrayWrapper wrapper;
+ do {
+ rand.nextBytes(b);
+ wrapper = new ByteArrayWrapper(b);
+ } while (newList.containsKey(wrapper));
+
+ filter.addKey(b);
+ newList.put(wrapper, b);
+ assertTrue("check add NEW", filter.checkFilter(b));
+ }
+
+ // Remove the "NEW" keys and count false positive
+ for (byte[] b : newList.values())
+ filter.removeKey(b);
+ for (byte[] b : newList.values())
+ if (filter.checkFilter(b))
+ fPos++;
+
+ // Check if some should were removed
+ assertFalse("100% false positive?", fPos == PASS_REMOVE);
+
+ // Check if old keys still here
+ for (byte[] b : baseList.values())
+ assertTrue("check original", filter.checkFilter(b));
+ }
+
+ private void _testFilterFalsePositive(BloomFilter filter) {
+ Set<ByteArrayWrapper> list = new HashSet<ByteArrayWrapper>();
+
+ // Add Keys
+ for (int i = 0; i < PASS; i++) {
+ byte[] b = new byte[32];
+ do {
+ rand.nextBytes(b);
+ } while (list.contains(new ByteArrayWrapper(b)));
+
+ filter.addKey(b);
+ list.add(new ByteArrayWrapper(b));
+ assertTrue("check add", filter.checkFilter(b));
+ }
+
+ System.out.println("---" + filter + "---");
+
+ int fPos = 0;
+ for (int i = 0; i < PASS_FALSE; i++) {
+ byte[] b = new byte[64]; // 64 bytes, sure not exist
+ rand.nextBytes(b);
+
+ if (filter.checkFilter(b))
+ fPos++;
+ }
+
+ final int K = filter.getK();
+ final double q = 1 - Math.pow(1 - 1.0 / FILTER_SIZE, K * PASS);
+ final double p = Math.pow(q, K);
+ final double actual = (double) fPos / PASS_FALSE;
+ final double limit = p * 1.05 + 1.0 / PASS_FALSE;
+
+ //*-
+ System.out.println(" k = " + K);
+ System.out.println(" q = " + q);
+ System.out.println(" p = " + p);
+ System.out.println(" limit = " + limit);
+ System.out.println(" actual = " + actual);
+ System.out.println(" actual / p = " + actual / p);
+ /**/
+
+ assertFalse("false positive, p=" + p + ", actual=" + actual,
actual > limit);
+ }
+
+ public void testCountingFilterFalsePositive() {
+ int K = BloomFilter.optimialK(FILTER_SIZE, PASS);
+ BloomFilter filter = BloomFilter.createFilter(FILTER_SIZE, K,
true);
+ _testFilterFalsePositive(filter);
+ }
+
+ public void testBinaryFilterFalsePositive() {
+ int K = BloomFilter.optimialK(FILTER_SIZE, PASS);
+ BloomFilter filter = BloomFilter.createFilter(FILTER_SIZE, K,
false);
+ _testFilterFalsePositive(filter);
+ }
+}