Author: toad
Date: 2006-08-05 02:09:37 +0000 (Sat, 05 Aug 2006)
New Revision: 9896
Modified:
trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
trunk/freenet/src/freenet/client/async/SendableGet.java
trunk/freenet/src/freenet/client/async/USKChecker.java
trunk/freenet/src/freenet/node/InsertHandler.java
trunk/freenet/src/freenet/node/Node.java
trunk/freenet/src/freenet/node/RequestHandler.java
trunk/freenet/src/freenet/node/RequestSender.java
trunk/freenet/src/freenet/node/SSKInsertHandler.java
trunk/freenet/src/freenet/node/Version.java
trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
trunk/freenet/src/freenet/store/FreenetStore.java
Log:
927: Two-level cache. MAJOR CAVEAT:
- Shrinking smoothly not implemented yet, so it doesn't shrink the old cache AT
ALL to make way for the new one.
- I.e. the store will be up to storeSize and the cache will also be up to
storeSize.
- So definitely not to be fired and forgotten. DO NOT INSERT TO UPDATER YET.
Experimental code; simulations say two level cache is good, but we won't see
its full effect until it is mandatory (which it isn't yet because of the above).
Modified: trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2006-08-05 00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2006-08-05 02:09:37 UTC (rev 9896)
@@ -103,4 +103,9 @@
return parent.getClient();
}
+ public boolean dontCache() {
+ return !ctx.cacheLocalRequests;
+ }
+
+
}
Modified: trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
2006-08-05 00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
2006-08-05 02:09:37 UTC (rev 9896)
@@ -154,7 +154,7 @@
if(!getter.ignoreStore()) {
ClientKeyBlock block;
try {
- block = node.fetchKey(getter.getKey());
+ block = node.fetchKey(getter.getKey(),
getter.dontCache());
} catch (KeyVerifyException e) {
// Verify exception, probably bogus at
source;
// verifies at low-level, but not at
decode.
Modified: trunk/freenet/src/freenet/client/async/SendableGet.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SendableGet.java 2006-08-05
00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/client/async/SendableGet.java 2006-08-05
02:09:37 UTC (rev 9896)
@@ -19,4 +19,7 @@
/** Should the request ignore the datastore? */
public boolean ignoreStore();
+
+ /** If true, don't cache local requests */
+ public boolean dontCache();
}
Modified: trunk/freenet/src/freenet/client/async/USKChecker.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKChecker.java 2006-08-05
00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/client/async/USKChecker.java 2006-08-05
02:09:37 UTC (rev 9896)
@@ -74,5 +74,5 @@
public String toString() {
return "USKChecker for "+key.getURI();
}
-
+
}
Modified: trunk/freenet/src/freenet/node/InsertHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/InsertHandler.java 2006-08-05 00:51:02 UTC
(rev 9895)
+++ trunk/freenet/src/freenet/node/InsertHandler.java 2006-08-05 02:09:37 UTC
(rev 9896)
@@ -39,6 +39,7 @@
private byte[] headers;
private BlockReceiver br;
private Thread runThread;
+ private final boolean resetNearestLoc;
PartiallyReceivedBlock prb;
InsertHandler(Message req, long id, Node node, long startTime) {
@@ -55,7 +56,8 @@
if(PeerManager.distance(targetLoc, myLoc) <
PeerManager.distance(targetLoc, closestLoc)) {
closestLoc = myLoc;
htl = Node.MAX_HTL;
- }
+ resetNearestLoc = true;
+ } else resetNearestLoc = false;
}
public String toString() {
@@ -347,7 +349,7 @@
if(!canCommit) return;
if(!prb.allReceived()) return;
CHKBlock block = new CHKBlock(prb.getBlock(), headers, key);
- node.store(block);
+ node.store(block, resetNearestLoc);
Logger.minor(this, "Committed");
} catch (CHKVerifyException e) {
Logger.error(this, "Verify failed in InsertHandler: "+e+" -
headers: "+HexUtil.bytesToHex(headers), e);
Modified: trunk/freenet/src/freenet/node/Node.java
===================================================================
--- trunk/freenet/src/freenet/node/Node.java 2006-08-05 00:51:02 UTC (rev
9895)
+++ trunk/freenet/src/freenet/node/Node.java 2006-08-05 02:09:37 UTC (rev
9896)
@@ -528,6 +528,13 @@
private final FreenetStore sskDatastore;
/** The store of DSAPublicKeys (by hash) */
private final FreenetStore pubKeyDatastore;
+ /** These 3 are private because must be protected by synchronized(this)
*/
+ /** The CHK datastore */
+ private final FreenetStore chkDatacache;
+ /** The SSK datastore */
+ private final FreenetStore sskDatacache;
+ /** The store of DSAPublicKeys (by hash) */
+ private final FreenetStore pubKeyDatacache;
/** RequestSender's currently running, by KeyHTLPair */
private final HashMap requestSenders;
/** RequestSender's currently transferring, by key */
@@ -1448,6 +1455,9 @@
chkDatastore.setMaxKeys(maxStoreKeys);
sskDatastore.setMaxKeys(maxStoreKeys);
pubKeyDatastore.setMaxKeys(maxStoreKeys);
+
chkDatacache.setMaxKeys(maxStoreKeys);
+
sskDatacache.setMaxKeys(maxStoreKeys);
+
pubKeyDatacache.setMaxKeys(maxStoreKeys);
} catch (IOException e) {
// FIXME we need to be
able to tell the user.
Logger.error(this,
"Caught "+e+" resizing the datastore", e);
@@ -1487,6 +1497,37 @@
throw new NodeInitException(EXIT_STORE_OTHER, msg);
}
+
+ String chkStorePath =
storeDir.getPath()+File.separator+"store-"+portNumber;
+ String chkCachePath =
storeDir.getPath()+File.separator+"cache-"+portNumber;
+ String pkStorePath =
storeDir.getPath()+File.separator+"pubkeystore-"+portNumber;
+ String pkCachePath =
storeDir.getPath()+File.separator+"pubkeycache-"+portNumber;
+ String sskStorePath =
storeDir.getPath()+File.separator+"sskstore-"+portNumber;
+ String sskCachePath =
storeDir.getPath()+File.separator+"sskcache-"+portNumber;
+ File chkStoreFile = new File(chkStorePath);
+ File chkCacheFile = new File(chkCachePath);
+ File pkStoreFile = new File(pkStorePath);
+ File pkCacheFile = new File(pkCachePath);
+ File sskStoreFile = new File(sskStorePath);
+ File sskCacheFile = new File(sskCachePath);
+
+ // Upgrade
+ if(chkStoreFile.exists() && !chkCacheFile.exists()) {
+ System.err.println("Renaming CHK store to CHK cache.");
+ if(!chkStoreFile.renameTo(chkCacheFile))
+ throw new NodeInitException(EXIT_STORE_OTHER,
"Could not migrate to two level cache: Could not rename "+chkStoreFile+" to
"+chkCacheFile);
+ }
+ if(pkStoreFile.exists() && !pkCacheFile.exists()) {
+ System.err.println("Renaming PK store to PK cache.");
+ if(!pkStoreFile.renameTo(pkCacheFile))
+ throw new NodeInitException(EXIT_STORE_OTHER,
"Could not migrate to two level cache: Could not rename "+pkStoreFile+" to
"+pkCacheFile);
+ }
+ if(sskStoreFile.exists() && !sskCacheFile.exists()) {
+ System.err.println("Renaming SSK store to SSK cache.");
+ if(!sskStoreFile.renameTo(sskCacheFile))
+ throw new NodeInitException(EXIT_STORE_OTHER,
"Could not migrate to two level cache: Could not rename "+sskStoreFile+" to
"+sskCacheFile);
+ }
+
try {
Logger.normal(this, "Initializing CHK Datastore");
System.out.println("Initializing CHK Datastore
("+maxStoreKeys+" keys)");
@@ -1495,34 +1536,67 @@
if((lastVersion > 0) && (lastVersion < 852)) {
throw new
DatabaseException("Reconstructing store because started from old version");
}
- tmp = new
BerkeleyDBFreenetStore(storeDir.getPath()+File.separator+"store-"+portNumber,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH, true);
+ tmp = new BerkeleyDBFreenetStore(chkStorePath,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH, true);
} catch (DatabaseException e) {
System.err.println("Could not open store: "+e);
e.printStackTrace();
System.err.println("Attempting to
reconstruct...");
WrapperManager.signalStarting(5*60*60*1000);
- tmp = new
BerkeleyDBFreenetStore(storeDir.getPath()+File.separator+"store-"+portNumber,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH,
BerkeleyDBFreenetStore.TYPE_CHK);
+ tmp = new BerkeleyDBFreenetStore(chkStorePath,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH,
BerkeleyDBFreenetStore.TYPE_CHK);
}
chkDatastore = tmp;
+ Logger.normal(this, "Initializing CHK Datacache");
+ System.out.println("Initializing CHK Datacache
("+maxStoreKeys+" keys)");
+ try {
+ if((lastVersion > 0) && (lastVersion < 852)) {
+ throw new
DatabaseException("Reconstructing store because started from old version");
+ }
+ tmp = new BerkeleyDBFreenetStore(chkCachePath,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH, true);
+ } catch (DatabaseException e) {
+ System.err.println("Could not open store: "+e);
+ e.printStackTrace();
+ System.err.println("Attempting to
reconstruct...");
+ WrapperManager.signalStarting(5*60*60*1000);
+ tmp = new BerkeleyDBFreenetStore(chkCachePath,
maxStoreKeys, 32768, CHKBlock.TOTAL_HEADERS_LENGTH,
BerkeleyDBFreenetStore.TYPE_CHK);
+ }
+ chkDatacache = tmp;
Logger.normal(this, "Initializing pubKey Datastore");
System.out.println("Initializing pubKey Datastore");
try {
if((lastVersion > 0) && (lastVersion < 852)) {
throw new
DatabaseException("Reconstructing store because started from old version");
}
- tmp = new
BerkeleyDBFreenetStore(storeDir.getPath()+File.separator+"pubkeystore-"+portNumber,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, true);
+ tmp = new BerkeleyDBFreenetStore(pkStorePath,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, true);
} catch (DatabaseException e) {
System.err.println("Could not open store: "+e);
e.printStackTrace();
System.err.println("Attempting to
reconstruct...");
WrapperManager.signalStarting(5*60*60*1000);
- tmp = new
BerkeleyDBFreenetStore(storeDir.getPath()+File.separator+"pubkeystore-"+portNumber,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, BerkeleyDBFreenetStore.TYPE_PUBKEY);
+ tmp = new BerkeleyDBFreenetStore(pkStorePath,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, BerkeleyDBFreenetStore.TYPE_PUBKEY);
}
this.pubKeyDatastore = tmp;
+ Logger.normal(this, "Initializing pubKey Datacache");
+ System.out.println("Initializing pubKey Datacache");
+ try {
+ if((lastVersion > 0) && (lastVersion < 852)) {
+ throw new
DatabaseException("Reconstructing store because started from old version");
+ }
+ tmp = new BerkeleyDBFreenetStore(pkCachePath,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, true);
+ } catch (DatabaseException e) {
+ System.err.println("Could not open store: "+e);
+ e.printStackTrace();
+ System.err.println("Attempting to
reconstruct...");
+ WrapperManager.signalStarting(5*60*60*1000);
+ tmp = new BerkeleyDBFreenetStore(pkCachePath,
maxStoreKeys, DSAPublicKey.PADDED_SIZE, 0, BerkeleyDBFreenetStore.TYPE_PUBKEY);
+ }
+ this.pubKeyDatacache = tmp;
// FIXME can't auto-fix SSK stores.
Logger.normal(this, "Initializing SSK Datastore");
System.out.println("Initializing SSK Datastore");
- sskDatastore = new
BerkeleyDBFreenetStore(storeDir.getPath()+File.separator+"sskstore-"+portNumber,
maxStoreKeys, 1024, SSKBlock.TOTAL_HEADERS_LENGTH, false);
+ sskDatastore = new BerkeleyDBFreenetStore(sskStorePath,
maxStoreKeys, 1024, SSKBlock.TOTAL_HEADERS_LENGTH, false);
+ Logger.normal(this, "Initializing SSK Datacache");
+ System.out.println("Initializing SSK Datacache");
+ sskDatacache = new BerkeleyDBFreenetStore(sskCachePath,
maxStoreKeys, 1024, SSKBlock.TOTAL_HEADERS_LENGTH, false);
} catch (FileNotFoundException e1) {
String msg = "Could not open datastore: "+e1;
Logger.error(this, msg, e1);
@@ -1866,7 +1940,7 @@
Logger.error(this, "Could not lock UID just randomly
generated: "+uid+" - probably indicates broken PRNG");
throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
}
- Object o = makeRequestSender(key.getNodeCHK(), MAX_HTL, uid,
null, lm.loc.getValue(), localOnly, cache, ignoreStore);
+ Object o = makeRequestSender(key.getNodeCHK(), MAX_HTL, uid,
null, lm.loc.getValue(), false, localOnly, cache, ignoreStore);
if(o instanceof CHKBlock) {
try {
return new ClientCHKBlock((CHKBlock)o, key);
@@ -1960,7 +2034,7 @@
Logger.error(this, "Could not lock UID just randomly
generated: "+uid+" - probably indicates broken PRNG");
throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
}
- Object o = makeRequestSender(key.getNodeKey(), MAX_HTL, uid,
null, lm.loc.getValue(), localOnly, cache, ignoreStore);
+ Object o = makeRequestSender(key.getNodeKey(), MAX_HTL, uid,
null, lm.loc.getValue(), false, localOnly, cache, ignoreStore);
if(o instanceof SSKBlock) {
try {
SSKBlock block = (SSKBlock)o;
@@ -2068,11 +2142,7 @@
}
long startTime = System.currentTimeMillis();
if(cache) {
- try {
- chkDatastore.put(block);
- } catch (IOException e) {
- Logger.error(this, "Datastore failure: "+e, e);
- }
+ store(block, false);
}
is = makeInsertSender((NodeCHK)block.getKey(),
MAX_HTL, uid, null, headers, prb, false,
lm.getLocation().getValue(), cache);
@@ -2179,15 +2249,14 @@
long startTime = System.currentTimeMillis();
if(cache) {
try {
- sskDatastore.put(block, false);
- } catch (IOException e) {
- Logger.error(this, "Datastore failure: "+e, e);
+ if(cache)
+ store(block, false);
} catch (KeyCollisionException e) {
throw new
LowLevelPutException(LowLevelPutException.COLLISION);
}
}
is = makeInsertSender(block,
- MAX_HTL, uid, null, false,
lm.getLocation().getValue(), cache);
+ MAX_HTL, uid, null, false,
lm.getLocation().getValue(), false, cache);
boolean hasReceivedRejectedOverload = false;
// Wait for status
while(true) {
@@ -2248,7 +2317,7 @@
if(is.hasCollided()) {
// Store it locally so it can be fetched immediately,
and overwrites any locally inserted.
try {
- sskDatastore.put(is.getBlock(), true);
+ sskDatacache.put(is.getBlock(), true);
} catch (KeyCollisionException e) {
// Impossible
} catch (IOException e) {
@@ -2575,37 +2644,33 @@
* a RequestSender, unless the HTL is 0, in which case NULL.
* RequestSender.
*/
- public Object makeRequestSender(Key key, short htl, long uid, PeerNode
source, double closestLocation, boolean localOnly, boolean cache, boolean
ignoreStore) {
+ public Object makeRequestSender(Key key, short htl, long uid, PeerNode
source, double closestLocation, boolean resetClosestLocation, boolean
localOnly, boolean cache, boolean ignoreStore) {
Logger.minor(this,
"makeRequestSender("+key+","+htl+","+uid+","+source+") on "+portNumber);
// In store?
KeyBlock chk = null;
if(!ignoreStore) {
- try {
- if(key instanceof NodeCHK)
- chk = chkDatastore.fetch((NodeCHK)key,
!cache);
- else if(key instanceof NodeSSK) {
- NodeSSK k = (NodeSSK)key;
- DSAPublicKey pubKey = k.getPubKey();
- if(pubKey == null) {
- pubKey =
getKey(k.getPubKeyHash());
- Logger.minor(this, "Fetched
pubkey: "+pubKey+" "+(pubKey == null ? "" : pubKey.writeAsField()));
- try {
- k.setPubKey(pubKey);
- } catch (SSKVerifyException e) {
- Logger.error(this,
"Error setting pubkey: "+e, e);
- }
+ if(key instanceof NodeCHK) {
+ chk = fetch((NodeCHK)key, !cache);
+ } else if(key instanceof NodeSSK) {
+ NodeSSK k = (NodeSSK)key;
+ DSAPublicKey pubKey = k.getPubKey();
+ if(pubKey == null) {
+ pubKey = getKey(k.getPubKeyHash());
+ Logger.minor(this, "Fetched pubkey:
"+pubKey+" "+(pubKey == null ? "" : pubKey.writeAsField()));
+ try {
+ k.setPubKey(pubKey);
+ } catch (SSKVerifyException e) {
+ Logger.error(this, "Error
setting pubkey: "+e, e);
}
- if(pubKey != null) {
- Logger.minor(this, "Got pubkey:
"+pubKey+" "+pubKey.writeAsField());
- chk =
sskDatastore.fetch((NodeSSK)key, !cache);
- } else {
- Logger.minor(this, "Not found
because no pubkey: "+uid);
- }
- } else
- throw new
IllegalStateException("Unknown key type: "+key.getClass());
- } catch (IOException e) {
- Logger.error(this, "Error accessing store: "+e,
e);
- }
+ }
+ if(pubKey != null) {
+ Logger.minor(this, "Got pubkey:
"+pubKey+" "+pubKey.writeAsField());
+ chk = fetch((NodeSSK)key, !cache);
+ } else {
+ Logger.minor(this, "Not found because
no pubkey: "+uid);
+ }
+ } else
+ throw new IllegalStateException("Unknown key
type: "+key.getClass());
if(chk != null) return chk;
}
if(localOnly) return null;
@@ -2636,7 +2701,7 @@
return sender;
}
- sender = new RequestSender(key, null, htl, uid, this,
closestLocation, source);
+ sender = new RequestSender(key, null, htl, uid, this,
closestLocation, resetClosestLocation, source);
// RequestSender adds itself to requestSenders
}
sender.start();
@@ -2707,39 +2772,66 @@
}
}
- public SSKBlock fetch(NodeSSK key) {
+ public SSKBlock fetch(NodeSSK key, boolean dontPromote) {
+ dumpStoreHits();
try {
- return sskDatastore.fetch(key, false);
+ SSKBlock block = sskDatastore.fetch(key, dontPromote);
+ if(block != null) {
+ return block;
+ }
+ return sskDatacache.fetch(key, dontPromote);
} catch (IOException e) {
Logger.error(this, "Cannot fetch data: "+e, e);
return null;
}
}
- public CHKBlock fetch(NodeCHK key) {
+ public CHKBlock fetch(NodeCHK key, boolean dontPromote) {
+ dumpStoreHits();
try {
- return chkDatastore.fetch(key, false);
+ CHKBlock block = chkDatastore.fetch(key, dontPromote);
+ if(block != null) return block;
+ return chkDatacache.fetch(key, dontPromote);
} catch (IOException e) {
Logger.error(this, "Cannot fetch data: "+e, e);
return null;
}
}
+
+ long timeLastDumpedHits;
+ public void dumpStoreHits() {
+ long now = System.currentTimeMillis();
+ if(now - timeLastDumpedHits > 5000) {
+ timeLastDumpedHits = now;
+ } else return;
+ Logger.minor(this, "Distribution of hits and misses over
stores:\n"+
+ "CHK Datastore:
"+chkDatastore.hits()+"/"+(chkDatastore.hits()+chkDatastore.misses())+"/"+chkDatastore.keyCount()+
+ "\nCHK Datacache:
"+chkDatacache.hits()+"/"+(chkDatacache.hits()+chkDatacache.misses())+"/"+chkDatacache.keyCount()+
+ "\nSSK Datastore:
"+sskDatastore.hits()+"/"+(sskDatastore.hits()+sskDatastore.misses())+"/"+sskDatastore.keyCount()+
+ "\nSSK Datacache:
"+sskDatacache.hits()+"/"+(sskDatacache.hits()+sskDatacache.misses())+"/"+sskDatacache.keyCount());
+ }
+
/**
* Store a datum.
+ * @param deep If true, insert to the store as well as the cache.
*/
- public void store(CHKBlock block) {
+ public void store(CHKBlock block, boolean deep) {
try {
- chkDatastore.put(block);
+ if(deep)
+ chkDatastore.put(block);
+ chkDatacache.put(block);
} catch (IOException e) {
Logger.error(this, "Cannot store data: "+e, e);
}
}
- public void store(SSKBlock block) throws KeyCollisionException {
+ public void store(SSKBlock block, boolean deep) throws
KeyCollisionException {
try {
- sskDatastore.put(block, false);
- cacheKey(((NodeSSK)block.getKey()).getPubKeyHash(),
((NodeSSK)block.getKey()).getPubKey());
+ if(deep)
+ sskDatastore.put(block, false);
+ sskDatacache.put(block, false);
+ cacheKey(((NodeSSK)block.getKey()).getPubKeyHash(),
((NodeSSK)block.getKey()).getPubKey(), deep);
} catch (IOException e) {
Logger.error(this, "Cannot store data: "+e, e);
}
@@ -2852,12 +2944,13 @@
* if it originated locally.
*/
public SSKInsertSender makeInsertSender(SSKBlock block, short htl, long
uid, PeerNode source,
- boolean fromStore, double closestLoc, boolean cache) {
+ boolean fromStore, double closestLoc, boolean
resetClosestLoc, boolean cache) {
NodeSSK key = (NodeSSK) block.getKey();
if(key.getPubKey() == null) {
throw new IllegalArgumentException("No pub key when
inserting");
}
- cacheKey(key.getPubKeyHash(), key.getPubKey());
+ if(cache)
+ cacheKey(key.getPubKeyHash(), key.getPubKey(),
resetClosestLoc);
Logger.minor(this,
"makeInsertSender("+key+","+htl+","+uid+","+source+",...,"+fromStore);
KeyHTLPair kh = new KeyHTLPair(key, htl);
SSKInsertSender is = null;
@@ -3079,9 +3172,12 @@
}
}
try {
- DSAPublicKey key = pubKeyDatastore.fetchPubKey(hash,
false);
+ DSAPublicKey key;
+ key = pubKeyDatastore.fetchPubKey(hash, false);
+ if(key == null)
+ key = pubKeyDatacache.fetchPubKey(hash, false);
if(key != null) {
- cacheKey(hash, key);
+ cacheKey(hash, key, false);
Logger.minor(this, "Got
"+HexUtil.bytesToHex(hash)+" from store");
}
return key;
@@ -3095,7 +3191,7 @@
/**
* Cache a public key
*/
- public void cacheKey(byte[] hash, DSAPublicKey key) {
+ public void cacheKey(byte[] hash, DSAPublicKey key, boolean deep) {
Logger.minor(this, "Cache key: "+HexUtil.bytesToHex(hash)+" :
"+key);
ImmutableByteArrayWrapper w = new
ImmutableByteArrayWrapper(hash);
synchronized(cachedPubKeys) {
@@ -3118,7 +3214,7 @@
} else {
Logger.error(this, "Old hash is
wrong!");
cachedPubKeys.removeKey(w);
- cacheKey(hash, key);
+ cacheKey(hash, key, deep);
}
} else {
Logger.error(this, "New hash is wrong");
@@ -3130,8 +3226,12 @@
cachedPubKeys.popKey();
}
try {
- pubKeyDatastore.put(hash, key);
- pubKeyDatastore.fetchPubKey(hash, true);
+ if(deep) {
+ pubKeyDatastore.put(hash, key);
+ pubKeyDatastore.fetchPubKey(hash, true);
+ }
+ pubKeyDatacache.put(hash, key);
+ pubKeyDatacache.fetchPubKey(hash, true);
} catch (IOException e) {
// FIXME deal with disk full, access perms etc; tell
user about it.
Logger.error(this, "Error accessing pubkey store: "+e,
e);
@@ -3142,32 +3242,32 @@
return testnetEnabled;
}
- public ClientKeyBlock fetchKey(ClientKey key) throws KeyVerifyException
{
+ public ClientKeyBlock fetchKey(ClientKey key, boolean dontPromote)
throws KeyVerifyException {
if(key instanceof ClientCHK)
- return fetch((ClientCHK)key);
+ return fetch((ClientCHK)key, dontPromote);
else if(key instanceof ClientSSK)
- return fetch((ClientSSK)key);
+ return fetch((ClientSSK)key, dontPromote);
else
throw new IllegalStateException("Don't know what to do
with "+key);
}
- private ClientKeyBlock fetch(ClientSSK clientSSK) throws
SSKVerifyException {
+ private ClientKeyBlock fetch(ClientSSK clientSSK, boolean dontPromote)
throws SSKVerifyException {
DSAPublicKey key = clientSSK.getPubKey();
if(key == null) {
key = getKey(clientSSK.pubKeyHash);
}
if(key == null) return null;
clientSSK.setPublicKey(key);
- SSKBlock block = fetch((NodeSSK)clientSSK.getNodeKey());
+ SSKBlock block = fetch((NodeSSK)clientSSK.getNodeKey(),
dontPromote);
if(block == null) return null;
// Move the pubkey to the top of the LRU, and fix it if it
// was corrupt.
- cacheKey(clientSSK.pubKeyHash, key);
+ cacheKey(clientSSK.pubKeyHash, key, false);
return new ClientSSKBlock(block, clientSSK);
}
- private ClientKeyBlock fetch(ClientCHK clientCHK) throws
CHKVerifyException {
- CHKBlock block = fetch(clientCHK.getNodeCHK());
+ private ClientKeyBlock fetch(ClientCHK clientCHK, boolean dontPromote)
throws CHKVerifyException {
+ CHKBlock block = fetch(clientCHK.getNodeCHK(), dontPromote);
if(block == null) return null;
return new ClientCHKBlock(block, clientCHK);
}
Modified: trunk/freenet/src/freenet/node/RequestHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestHandler.java 2006-08-05 00:51:02 UTC
(rev 9895)
+++ trunk/freenet/src/freenet/node/RequestHandler.java 2006-08-05 02:09:37 UTC
(rev 9896)
@@ -29,6 +29,7 @@
private boolean needsPubKey;
final Key key;
private boolean finalTransferFailed = false;
+ final boolean resetClosestLoc;
public String toString() {
return super.toString()+" for "+uid;
@@ -47,7 +48,9 @@
if(PeerManager.distance(keyLoc, myLoc) < PeerManager.distance(keyLoc,
closestLoc)) {
closestLoc = myLoc;
htl = Node.MAX_HTL;
- }
+ resetClosestLoc = true;
+ } else
+ resetClosestLoc = false;
if(key instanceof NodeSSK)
needsPubKey = m.getBoolean(DMT.NEED_PUB_KEY);
}
@@ -62,7 +65,7 @@
Message accepted = DMT.createFNPAccepted(uid);
source.send(accepted, null);
- Object o = node.makeRequestSender(key, htl, uid, source, closestLoc,
false, true, false);
+ Object o = node.makeRequestSender(key, htl, uid, source, closestLoc,
resetClosestLoc, false, true, false);
if(o instanceof KeyBlock) {
KeyBlock block = (KeyBlock) o;
Message df = createDataFound(block);
Modified: trunk/freenet/src/freenet/node/RequestSender.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestSender.java 2006-08-05 00:51:02 UTC
(rev 9895)
+++ trunk/freenet/src/freenet/node/RequestSender.java 2006-08-05 02:09:37 UTC
(rev 9896)
@@ -46,6 +46,7 @@
// Basics
final Key key;
final double target;
+ final boolean resetNearestLoc;
private short htl;
final long uid;
final Node node;
@@ -83,7 +84,7 @@
* @param key The key to request. Its public key should have been looked up
* already; RequestSender will not look it up.
*/
- public RequestSender(Key key, DSAPublicKey pubKey, short htl, long uid,
Node n, double nearestLoc,
+ public RequestSender(Key key, DSAPublicKey pubKey, short htl, long uid,
Node n, double nearestLoc, boolean resetNearestLoc,
PeerNode source) {
this.key = key;
this.pubKey = pubKey;
@@ -92,6 +93,7 @@
this.node = n;
this.source = source;
this.nearestLoc = nearestLoc;
+ this.resetNearestLoc = resetNearestLoc;
target = key.toNormalizedDouble();
node.addRequestSender(key, htl, this);
}
@@ -397,7 +399,7 @@
private void finishSSK(PeerNode next) {
try {
block = new SSKBlock(sskData, headers, (NodeSSK)key,
false);
- node.store(block);
+ node.store(block, resetNearestLoc);
if(node.random.nextInt(RANDOM_REINSERT_INTERVAL) == 0)
node.queueRandomReinsert(block);
finish(SUCCESS, next);
@@ -434,12 +436,12 @@
private void verifyAndCommit(byte[] data) throws KeyVerifyException {
if(key instanceof NodeCHK) {
CHKBlock block = new CHKBlock(data, headers, (NodeCHK)key);
- node.store(block);
+ node.store(block, resetNearestLoc);
if(node.random.nextInt(RANDOM_REINSERT_INTERVAL) == 0)
node.queueRandomReinsert(block);
} else if (key instanceof NodeSSK) {
try {
- node.store(new SSKBlock(data, headers,
(NodeSSK)key, false));
+ node.store(new SSKBlock(data, headers,
(NodeSSK)key, false), resetNearestLoc);
} catch (KeyCollisionException e) {
Logger.normal(this, "Collision on "+this);
}
Modified: trunk/freenet/src/freenet/node/SSKInsertHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/SSKInsertHandler.java 2006-08-05
00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/node/SSKInsertHandler.java 2006-08-05
02:09:37 UTC (rev 9896)
@@ -32,6 +32,7 @@
private SSKBlock block;
private DSAPublicKey pubKey;
private double closestLoc;
+ private final boolean resetClosestLoc;
private short htl;
private SSKInsertSender sender;
private byte[] data;
@@ -51,7 +52,8 @@
if(PeerManager.distance(targetLoc, myLoc) <
PeerManager.distance(targetLoc, closestLoc)) {
closestLoc = myLoc;
htl = Node.MAX_HTL;
- }
+ resetClosestLoc = true;
+ } else resetClosestLoc = false;
byte[] pubKeyHash =
((ShortBuffer)req.getObject(DMT.PUBKEY_HASH)).getData();
pubKey = node.getKey(pubKeyHash);
data = ((ShortBuffer) req.getObject(DMT.DATA)).getData();
@@ -138,7 +140,7 @@
return;
}
- SSKBlock storedBlock = node.fetch(key);
+ SSKBlock storedBlock = node.fetch(key, false);
if((storedBlock != null) && !storedBlock.equals(block)) {
Message msg = DMT.createFNPSSKDataFound(uid,
storedBlock.getRawHeaders(), storedBlock.getRawData());
@@ -165,7 +167,7 @@
}
if(htl > 0)
- sender = node.makeInsertSender(block, htl, uid, source, false,
closestLoc, true);
+ sender = node.makeInsertSender(block, htl, uid, source, false,
closestLoc, resetClosestLoc, true);
boolean receivedRejectedOverload = false;
@@ -285,7 +287,7 @@
if(canCommit) {
try {
- node.store(block);
+ node.store(block, resetClosestLoc);
} catch (KeyCollisionException e) {
Logger.normal(this, "Collision on "+this);
}
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2006-08-05 00:51:02 UTC (rev
9895)
+++ trunk/freenet/src/freenet/node/Version.java 2006-08-05 02:09:37 UTC (rev
9896)
@@ -18,7 +18,7 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- private static final int buildNumber = 926;
+ private static final int buildNumber = 927;
/** Oldest build of Fred we will talk to */
private static final int oldLastGoodBuild = 874;
Modified: trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2006-08-05
00:51:02 UTC (rev 9895)
+++ trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2006-08-05
02:09:37 UTC (rev 9896)
@@ -67,6 +67,8 @@
private long chkBlocksInStore;
private final Object chkBlocksInStoreLock = new Object();
private long maxChkBlocks;
+ private long hits;
+ private long misses;
private final Database chkDB;
private final SecondaryDatabase chkDB_accessTime;
private final SecondaryDatabase chkDB_blockNum;
@@ -725,6 +727,9 @@
c = null;
t.abort();
t = null;
+ synchronized(this) {
+ misses++;
+ }
return null;
}
@@ -783,8 +788,14 @@
t.commit();
t = null;
addFreeBlock(storeBlock.offset);
+ synchronized(this) {
+ misses++;
+ }
return null;
}
+ synchronized(this) {
+ hits++;
+ }
return block;
}catch(Throwable ex) { // FIXME: ugly
if(c!=null) {
@@ -825,6 +836,9 @@
c = null;
t.abort();
t = null;
+ synchronized(this) {
+ misses++;
+ }
return null;
}
@@ -871,8 +885,14 @@
t.commit();
t = null;
addFreeBlock(storeBlock.offset);
+ synchronized(this) {
+ misses++;
+ }
return null;
}
+ synchronized(this) {
+ hits++;
+ }
return block;
}catch(Throwable ex) { // FIXME: ugly
if(c!=null) {
@@ -921,6 +941,9 @@
c = null;
t.abort();
t = null;
+ synchronized(this) {
+ misses++;
+ }
return null;
}
@@ -938,55 +961,36 @@
DSAPublicKey block = null;
- byte[] data = new byte[dataBlockSize];
- Logger.minor(this, "Reading from store...
"+storeBlock.offset+" ("+storeBlock.recentlyUsed+")");
- synchronized(chkStore) {
-
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
- chkStore.readFully(data);
- }
- Logger.minor(this, "Read");
-
- try {
- block = new DSAPublicKey(data);
- } catch (IOException e) {
- Logger.error(this, "Could not read key");
- c.close();
- c = null;
- t.abort();
- t = null;
- return null;
- }
-
- if(!Arrays.equals(block.asBytesHash(), hash)) {
-
- if(replacement != null) {
- Logger.normal(this, "Replacing corrupt
DSAPublicKey ("+HexUtil.bytesToHex(hash));
- synchronized(chkStore) {
-
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
- byte[] toWrite =
block.asPaddedBytes();
- chkStore.write(toWrite);
- }
- } else {
- Logger.error(this, "DSAPublicKey: Does
not verify (unequal hashes), setting accessTime to 0 for :
"+HexUtil.bytesToHex(hash));
- c.close();
- c = null;
- chkDB.delete(t, routingkeyDBE);
- t.commit();
- t = null;
- addFreeBlock(storeBlock.offset);
- return null;
- }
- }
-
- // Finished, commit.
- c.close();
- c = null;
- t.commit();
- t = null;
-
- Logger.minor(this, "Get key:
"+HexUtil.bytesToHex(hash));
- Logger.minor(this, "Data: "+data.length+" bytes, hash
"+data);
-
+ byte[] data = new byte[dataBlockSize];
+ Logger.minor(this, "Reading from store... "+storeBlock.offset+"
("+storeBlock.recentlyUsed+")");
+ synchronized(chkStore) {
+
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
+ chkStore.readFully(data);
+ }
+ Logger.minor(this, "Read");
+
+ try {
+ block = new DSAPublicKey(data);
+ } catch (IOException e) {
+ Logger.error(this, "Could not read key: "+e, e);
+ finishKey(storeBlock, c, t, routingkeyDBE, hash,
replacement);
+ }
+
+ if(!Arrays.equals(block.asBytesHash(), hash)) {
+ finishKey(storeBlock, c, t, routingkeyDBE, hash,
replacement);
+ }
+ // Finished, commit.
+ c.close();
+ c = null;
+ t.commit();
+ t = null;
+
+ Logger.minor(this, "Get key: "+HexUtil.bytesToHex(hash));
+ Logger.minor(this, "Data: "+data.length+" bytes, hash "+data);
+
+ synchronized(this) {
+ hits++;
+ }
return block;
}catch(Throwable ex) { // FIXME: ugly
if(c!=null) {
@@ -1004,7 +1008,31 @@
// return null;
}
- private void addFreeBlock(long offset) {
+ private boolean finishKey(StoreBlock storeBlock, Cursor c, Transaction t,
DatabaseEntry routingkeyDBE, byte[] hash, DSAPublicKey replacement) throws
IOException, DatabaseException {
+ if(replacement != null) {
+ Logger.normal(this, "Replacing corrupt DSAPublicKey
("+HexUtil.bytesToHex(hash));
+ synchronized(chkStore) {
+
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
+ byte[] toWrite = replacement.asPaddedBytes();
+ chkStore.write(toWrite);
+ }
+ return true;
+ } else {
+ Logger.error(this, "DSAPublicKey: Does not verify
(unequal hashes), setting accessTime to 0 for : "+HexUtil.bytesToHex(hash));
+ c.close();
+ c = null;
+ chkDB.delete(t, routingkeyDBE);
+ t.commit();
+ t = null;
+ addFreeBlock(storeBlock.offset);
+ synchronized(this) {
+ misses++;
+ }
+ return false;
+ }
+ }
+
+ private void addFreeBlock(long offset) {
if(freeBlocks.push(offset)) {
System.err.println("Freed block "+offset);
Logger.normal(this, "Freed block "+offset);
@@ -1487,4 +1515,16 @@
}
maybeShrink(false, false);
}
+
+ public long hits() {
+ return hits;
+ }
+
+ public long misses() {
+ return misses;
+ }
+
+ public long keyCount() {
+ return chkBlocksInStore;
+ }
}
Modified: trunk/freenet/src/freenet/store/FreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/FreenetStore.java 2006-08-05 00:51:02 UTC
(rev 9895)
+++ trunk/freenet/src/freenet/store/FreenetStore.java 2006-08-05 02:09:37 UTC
(rev 9896)
@@ -58,4 +58,10 @@
* @throws DatabaseException
*/
public void setMaxKeys(long maxStoreKeys) throws DatabaseException,
IOException;
+
+ public long hits();
+
+ public long misses();
+
+ public long keyCount();
}