Author: toad
Date: 2009-02-17 23:37:53 +0000 (Tue, 17 Feb 2009)
New Revision: 25675
Modified:
branches/db4o/freenet/src/freenet/client/ArchiveExtractCallback.java
branches/db4o/freenet/src/freenet/client/ArchiveHandlerImpl.java
branches/db4o/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
branches/db4o/freenet/src/freenet/client/async/DatastoreChecker.java
branches/db4o/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
branches/db4o/freenet/src/freenet/client/async/SingleFileFetcher.java
branches/db4o/freenet/src/freenet/client/async/SplitFileFetcher.java
branches/db4o/freenet/src/freenet/client/async/USKChecker.java
branches/db4o/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
branches/db4o/freenet/src/freenet/keys/USK.java
branches/db4o/freenet/src/freenet/node/fcp/FCPClient.java
branches/db4o/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
Log:
Lots of work on fixing leaks of database objects:
Remove ArchiveExtractCallback's from the database when finished.
Remove the ClientRequest from the database *after* cancelling it.
BaseSingleFileFetcher and descendants, SplitFileFetcher: Track whether we
created a new FetchContext, and delete it if we did; pass on the responsibility
as appropriate.
SingleFileFetcher: Remove the SFF from the database where necessary when
switching to a new fetcher, when throwing etc.
SingleFileFetcher: clone the ClientKey and thisKey
SingleFileFetcher: Clear the archiveMetadata in multi-level metadata/archive
fetchers.
ArchiveFetcher-/MultiLevelMetadata-Callback: Call removeFrom() on the
ClientGetState, and delete self.
USKProxyCompletionCallback: remove self when done
DatastoreChecker: Delete the DCI even if the original SendableRequest has been
removed already.
SplitFileFetcher: clone clientMetadata as we do in SingleFileFetcher, clone
decompressors. Delete them in removeFrom().
SplitFileFetcher: write the metadata after removing the keys from it.
Minor activation in SplitFileFetcher.removeFrom()
Modified: branches/db4o/freenet/src/freenet/client/ArchiveExtractCallback.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/ArchiveExtractCallback.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/ArchiveExtractCallback.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -21,5 +21,7 @@
/** Failed for some other reason */
public void onFailed(ArchiveFailureException e, ObjectContainer
container, ClientContext context);
+
+ public void removeFrom(ObjectContainer container);
}
Modified: branches/db4o/freenet/src/freenet/client/ArchiveHandlerImpl.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/ArchiveHandlerImpl.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/ArchiveHandlerImpl.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -147,6 +147,7 @@
tag.callback.notInArchive(container, context);
else
tag.callback.gotBucket(data, container, context);
+
tag.callback.removeFrom(container);
container.deactivate(tag.callback, 1);
container.delete(tag);
}
@@ -160,6 +161,7 @@
public void run(ObjectContainer
container, ClientContext context) {
container.activate(tag.callback, 1);
tag.callback.onFailed(e, container, context);
+
tag.callback.removeFrom(container);
container.delete(tag);
}
@@ -172,6 +174,7 @@
public void run(ObjectContainer
container, ClientContext context) {
container.activate(tag.callback, 1);
tag.callback.onFailed(e, container, context);
+
tag.callback.removeFrom(container);
container.delete(tag);
}
@@ -218,6 +221,10 @@
// Must not be called.
throw new UnsupportedOperationException();
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified:
branches/db4o/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -31,13 +31,15 @@
final int maxRetries;
private int retryCount;
final FetchContext ctx;
+ protected boolean deleteFetchContext;
static final SendableRequestItem[] keys = new SendableRequestItem[] {
NullSendableRequestItem.nullItem };
/** It is essential that we know when the cooldown will end, otherwise
we cannot
* remove the key from the queue if we are killed before that */
long cooldownWakeupTime;
- protected BaseSingleFileFetcher(ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent) {
+ protected BaseSingleFileFetcher(ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent, boolean deleteFetchContext) {
super(parent);
+ this.deleteFetchContext = deleteFetchContext;
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Creating BaseSingleFileFetcher for
"+key);
retryCount = 0;
@@ -339,7 +341,7 @@
public void removeFrom(ObjectContainer container, ClientContext
context) {
super.removeFrom(container, context);
- // ctx is passed in, not our responsibility
+ if(deleteFetchContext) ctx.removeFrom(container);
key.removeFrom(container);
}
Modified: branches/db4o/freenet/src/freenet/client/async/DatastoreChecker.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/DatastoreChecker.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/DatastoreChecker.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -395,6 +395,7 @@
// Completed and deleted
already.
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this,
"Already deleted from database");
+ container.delete(it);
return;
}
container.activate(get, 1);
Modified:
branches/db4o/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -26,8 +26,8 @@
public class SimpleSingleFileFetcher extends BaseSingleFileFetcher implements
ClientGetState {
SimpleSingleFileFetcher(ClientKey key, int maxRetries, FetchContext
ctx, ClientRequester parent,
- GetCompletionCallback rcb, boolean isEssential, boolean
dontAdd, long l, ObjectContainer container, ClientContext context) {
- super(key, maxRetries, ctx, parent);
+ GetCompletionCallback rcb, boolean isEssential, boolean
dontAdd, long l, ObjectContainer container, ClientContext context, boolean
deleteFetchContext) {
+ super(key, maxRetries, ctx, parent, deleteFetchContext);
this.rcb = rcb;
this.token = l;
if(!dontAdd) {
Modified: branches/db4o/freenet/src/freenet/client/async/SingleFileFetcher.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SingleFileFetcher.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/SingleFileFetcher.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -66,11 +66,11 @@
* FIXME: Many times where this is called internally we might be better
off using a copy constructor?
*/
public SingleFileFetcher(ClientRequester parent, GetCompletionCallback
cb, ClientMetadata metadata,
- ClientKey key, List<String> metaStrings, FreenetURI
origURI, int addedMetaStrings, FetchContext ctx,
+ ClientKey key, List<String> metaStrings, FreenetURI
origURI, int addedMetaStrings, FetchContext ctx, boolean deleteFetchContext,
ArchiveContext actx, ArchiveHandler ah, Metadata
archiveMetadata, int maxRetries, int recursionLevel,
boolean dontTellClientGet, long l, boolean isEssential,
Bucket returnBucket, boolean isFinal, ObjectContainer
container, ClientContext context) throws FetchException {
- super(key, maxRetries, ctx, parent, cb, isEssential, false, l,
container, context);
+ super(key, maxRetries, ctx, parent, cb, isEssential, false, l,
container, context, deleteFetchContext);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+key+" from "+origURI+" meta="+metaStrings.toString(), new Exception("debug"));
this.isFinal = isFinal;
@@ -103,9 +103,9 @@
/** Copy constructor, modifies a few given fields, don't call
schedule().
* Used for things like slave fetchers for MultiLevelMetadata,
therefore does not remember returnBucket,
* metaStrings etc. */
- public SingleFileFetcher(SingleFileFetcher fetcher, Metadata newMeta,
GetCompletionCallback callback, FetchContext ctx2, ObjectContainer container,
ClientContext context) throws FetchException {
+ public SingleFileFetcher(SingleFileFetcher fetcher, boolean persistent,
boolean deleteFetchContext, Metadata newMeta, GetCompletionCallback callback,
FetchContext ctx2, ObjectContainer container, ClientContext context) throws
FetchException {
// Don't add a block, we have already fetched the data, we are
just handling the metadata in a different fetcher.
- super(fetcher.key, fetcher.maxRetries, ctx2, fetcher.parent,
callback, false, true, fetcher.token, container, context);
+ super(persistent ? fetcher.key.cloneKey() : fetcher.key,
fetcher.maxRetries, ctx2, fetcher.parent, callback, false, true, fetcher.token,
container, context, deleteFetchContext);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+fetcher.key+" meta="+fetcher.metaStrings.toString(), new Exception("debug"));
this.returnBucket = null;
@@ -115,7 +115,7 @@
this.actx = fetcher.actx;
this.ah = fetcher.ah;
if(persistent && ah != null) ah = ah.cloneHandler();
- this.archiveMetadata = fetcher.archiveMetadata;
+ this.archiveMetadata = null;
this.clientMetadata = (fetcher.clientMetadata != null ?
(ClientMetadata) fetcher.clientMetadata.clone() : new ClientMetadata());
this.metadata = newMeta;
this.metaStrings = new ArrayList<String>();
@@ -123,7 +123,7 @@
this.recursionLevel = fetcher.recursionLevel + 1;
if(recursionLevel > ctx.maxRecursionLevel)
throw new
FetchException(FetchException.TOO_MUCH_RECURSION);
- this.thisKey = fetcher.thisKey;
+ this.thisKey = persistent ? fetcher.thisKey.clone() :
fetcher.thisKey;
// Copy the decompressors. Just because a multi-level metadata
splitfile
// is compressed, that **doesn't** mean that the data we are
eventually
// going to fetch is!
@@ -435,6 +435,9 @@
if(persistent)
container.deactivate(SingleFileFetcher.this, 1);
}
+ public void
removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}, container, context); // will result
in this function being called again
if(persistent) container.store(this);
return;
@@ -530,6 +533,9 @@
if(persistent)
container.deactivate(SingleFileFetcher.this, 1);
}
+ public void
removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}, container, context);
removeMetadata(container);
// Will call back into this function
when it has been fetched.
@@ -539,7 +545,7 @@
if(logMINOR) Logger.minor(this, "Is multi-level
metadata");
// Fetch on a second SingleFileFetcher, like
with archives.
metadata.setSimpleRedirect();
- final SingleFileFetcher f = new
SingleFileFetcher(this, metadata, new MultiLevelMetadataCallback(), ctx,
container, context);
+ final SingleFileFetcher f = new
SingleFileFetcher(this, persistent, false, metadata, new
MultiLevelMetadataCallback(), ctx, container, context);
// Clear our own metadata so it can be garbage
collected, it will be replaced by whatever is fetched.
// The new fetcher has our metadata so we don't
need to removeMetadata().
this.metadata = null;
@@ -600,7 +606,8 @@
addedMetaStrings++;
}
- final SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, redirectedKey, metaStrings,
this.uri, addedMetaStrings, ctx, actx, ah, archiveMetadata, maxRetries,
recursionLevel, false, token, true, returnBucket, isFinal, container, context);
+ final SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, redirectedKey, metaStrings,
this.uri, addedMetaStrings, ctx, deleteFetchContext, actx, ah, archiveMetadata,
maxRetries, recursionLevel, false, token, true, returnBucket, isFinal,
container, context);
+ this.deleteFetchContext = false;
if((redirectedKey instanceof ClientCHK) &&
!((ClientCHK)redirectedKey).isMetadata())
rcb.onBlockSetFinished(this, container,
context);
if(metadata.isCompressed()) {
@@ -615,7 +622,8 @@
}
f.schedule(container, context);
// All done! No longer our problem!
- removeMetadata(container);
+ archiveMetadata = null; // passed on
+ if(persistent) removeFrom(container, context);
return;
} else if(metadata.isSplitfile()) {
if(logMINOR) Logger.minor(this, "Fetching
splitfile");
@@ -643,7 +651,7 @@
!ctx.allowedMIMETypes.contains(mimeType)) {
// Just in case...
long len =
metadata.uncompressedDataLength();
- removeMetadata(container);
+ if(persistent) removeFrom(container,
context);
throw new
FetchException(FetchException.WRONG_MIME_TYPE, len, false,
clientMetadata.getMIMEType());
}
@@ -673,7 +681,7 @@
rcb.onFailure(new
FetchException(FetchException.TOO_MANY_PATH_COMPONENTS,
metadata.uncompressedDataLength(), (rcb == parent),
clientMetadata.getMIMEType(), tryURI), this, container, context);
}
// Just in case...
- removeMetadata(container);
+ if(persistent)
removeFrom(container, context);
return;
}
} else
@@ -687,12 +695,13 @@
(len > ctx.maxTempLength)) {
// Just in case...
boolean compressed =
metadata.isCompressed();
- removeMetadata(container);
+ if(persistent) removeFrom(container,
context);
throw new
FetchException(FetchException.TOO_BIG, len, isFinal && decompressors.size() <=
(compressed ? 1 : 0), clientMetadata.getMIMEType());
}
- SplitFileFetcher sf = new
SplitFileFetcher(metadata, rcb, parent, ctx,
+ SplitFileFetcher sf = new
SplitFileFetcher(metadata, rcb, parent, ctx, deleteFetchContext,
decompressors, clientMetadata,
actx, recursionLevel, returnBucket, token, container, context);
+ this.deleteFetchContext = false;
if(persistent) {
container.store(sf); // Avoid problems
caused by storing a deactivated sf
if(!container.ext().isActive(parent)) {
@@ -715,12 +724,7 @@
// and will have removed them from it so they
don't get removed here.
// Lack of garbage collection in db4o is a PITA!
// For multi-level metadata etc see above.
- removeMetadata(container);
-
- // SplitFile will now run.
- // Then it will return data to rcd.
- // We are now out of the loop. Yay!
- if(persistent) container.store(this);
+ if(persistent) removeFrom(container, context);
return;
} else {
Logger.error(this, "Don't know what to do with
metadata: "+metadata);
@@ -753,7 +757,7 @@
Metadata newMeta = (Metadata) meta.clone();
newMeta.setSimpleRedirect();
final SingleFileFetcher f;
- f = new SingleFileFetcher(this, newMeta, new
ArchiveFetcherCallback(forData, element, callback), new FetchContext(ctx,
FetchContext.SET_RETURN_ARCHIVES, true, null), container, context);
+ f = new SingleFileFetcher(this, persistent, true, newMeta, new
ArchiveFetcherCallback(forData, element, callback), new FetchContext(ctx,
FetchContext.SET_RETURN_ARCHIVES, true, null), container, context);
if(persistent) container.store(f);
if(logMINOR) Logger.minor(this, "fetchArchive(): "+f);
// Fetch the archive. The archive fetcher callback will unpack
it, and either call the element
@@ -834,6 +838,9 @@
ah.extractPersistentOffThread(result.asBucket(), actx, element, callback,
container, context);
if(!wasActive)
container.deactivate(SingleFileFetcher.this, 1);
+ if(state != null)
+ state.removeFrom(container, context);
+ container.delete(this);
}
}
@@ -865,6 +872,12 @@
SingleFileFetcher.this.onFailure(e, true, container,
context);
if(!wasActive)
container.deactivate(SingleFileFetcher.this, 1);
+ if(persistent) {
+ if(state != null)
+ state.removeFrom(container, context);
+ container.delete(this);
+ callback.removeFrom(container);
+ }
}
public void onBlockSetFinished(ClientGetState state,
ObjectContainer container, ClientContext context) {
@@ -948,6 +961,10 @@
}
if(!wasActive)
container.deactivate(SingleFileFetcher.this, 1);
+ if(state != null)
+ state.removeFrom(container, context);
+ if(persistent)
+ container.delete(this);
}
public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
@@ -960,6 +977,8 @@
SingleFileFetcher.this.onFailure(e, true, container,
context);
if(!wasActive)
container.deactivate(SingleFileFetcher.this, 1);
+ if(state != null)
+ state.removeFrom(container, context);
}
public void onBlockSetFinished(ClientGetState state,
ObjectContainer container, ClientContext context) {
@@ -1012,9 +1031,9 @@
if((!uri.hasMetaStrings()) &&
ctx.allowSplitfiles == false &&
ctx.followRedirects == false &&
returnBucket == null && key instanceof
ClientKey)
- return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, requester, cb, isEssential, false, l, container, context);
+ return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, requester, cb, isEssential, false, l, container, context,
false);
if(key instanceof ClientKey)
- return new SingleFileFetcher(requester, cb, null,
(ClientKey)key, new ArrayList<String>(uri.listMetaStrings()), uri, 0, ctx,
actx, null, null, maxRetries, recursionLevel, dontTellClientGet, l,
isEssential, returnBucket, isFinal, container, context);
+ return new SingleFileFetcher(requester, cb, null,
(ClientKey)key, new ArrayList<String>(uri.listMetaStrings()), uri, 0, ctx,
false, actx, null, null, maxRetries, recursionLevel, dontTellClientGet, l,
isEssential, returnBucket, isFinal, container, context);
else {
return uskCreate(requester, cb, (USK)key, new
ArrayList<String>(uri.listMetaStrings()), ctx, actx, maxRetries,
recursionLevel, dontTellClientGet, l, isEssential, returnBucket, isFinal,
container, context);
}
@@ -1039,7 +1058,7 @@
// Want to update the latest known good
iff the fetch succeeds.
SingleFileFetcher sf =
new
SingleFileFetcher(requester, myCB, null, usk.getSSK(), metaStrings,
-
usk.getURI().addMetaStrings(metaStrings), 0, ctx, actx, null, null, maxRetries,
recursionLevel,
+
usk.getURI().addMetaStrings(metaStrings), 0, ctx, false, actx, null, null,
maxRetries, recursionLevel,
dontTellClientGet, l, isEssential, returnBucket, isFinal, container, context);
return sf;
}
@@ -1094,8 +1113,8 @@
ClientSSK key = usk.getSSK(l);
try {
if(l == usk.suggestedEdition) {
- SingleFileFetcher sf = new
SingleFileFetcher(parent, cb, null, key, metaStrings,
key.getURI().addMetaStrings(metaStrings),
- 0, ctx, actx, null,
null, maxRetries, recursionLevel+1, dontTellClientGet, token, false,
returnBucket, true, container, context);
+ SingleFileFetcher sf = new
SingleFileFetcher(parent, cb, null, persistent ? key : key.cloneKey(),
metaStrings, key.getURI().addMetaStrings(metaStrings),
+ 0, ctx, false, actx,
null, null, maxRetries, recursionLevel+1, dontTellClientGet, token, false,
returnBucket, true, container, context);
sf.schedule(container, context);
} else {
cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
newUSK.getURI().addMetaStrings(metaStrings)), null, container, context);
Modified: branches/db4o/freenet/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SplitFileFetcher.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/SplitFileFetcher.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -6,6 +6,7 @@
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
+import java.util.ArrayList;
import java.util.List;
import com.db4o.ObjectContainer;
@@ -37,6 +38,7 @@
final FetchContext fetchContext;
final FetchContext blockFetchContext;
+ final boolean deleteFetchContext;
final ArchiveContext archiveContext;
final List decompressors;
final ClientMetadata clientMetadata;
@@ -112,9 +114,10 @@
private transient SplitFileFetcherKeyListener tempListener;
public SplitFileFetcher(Metadata metadata, GetCompletionCallback rcb,
ClientRequester parent2,
- FetchContext newCtx, List decompressors2,
ClientMetadata clientMetadata,
+ FetchContext newCtx, boolean deleteFetchContext, List
decompressors2, ClientMetadata clientMetadata,
ArchiveContext actx, int recursionLevel, Bucket
returnBucket, long token2, ObjectContainer container, ClientContext context)
throws FetchException, MetadataParseException {
this.persistent = parent2.persistent();
+ this.deleteFetchContext = deleteFetchContext;
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Persistence = "+persistent+" from
"+parent2, new Exception("debug"));
this.hashCode = super.hashCode();
@@ -124,11 +127,11 @@
if(newCtx == null)
throw new NullPointerException();
this.archiveContext = actx;
- this.decompressors = decompressors2;
+ this.decompressors = persistent ? new ArrayList(decompressors2)
: decompressors2;
if(decompressors.size() > 1) {
Logger.error(this, "Multiple decompressors:
"+decompressors.size()+" - this is almost certainly a bug", new
Exception("debug"));
}
- this.clientMetadata = clientMetadata;
+ this.clientMetadata = clientMetadata == null ? new
ClientMetadata() : (ClientMetadata) clientMetadata.clone(); // copy it as in
SingleFileFetcher
this.cb = rcb;
this.recursionLevel = recursionLevel + 1;
this.parent = parent2;
@@ -143,6 +146,7 @@
if(persistent) {
// Clear them here so they don't get deleted and we
don't need to clone them.
metadata.clearSplitfileKeys();
+ container.store(metadata);
}
for(int i=0;i<splitfileDataBlocks.length;i++)
if(splitfileDataBlocks[i] == null) throw new
MetadataParseException("Null: data block "+i+" of "+splitfileDataBlocks.length);
@@ -651,7 +655,13 @@
}
public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.activate(blockFetchContext, 1);
blockFetchContext.removeFrom(container);
+ if(deleteFetchContext)
+ fetchContext.removeFrom(container);
+ container.activate(clientMetadata, 1);
+ clientMetadata.removeFrom(container);
+ container.delete(decompressors);
for(int i=0;i<segments.length;i++) {
SplitFileFetcherSegment segment = segments[i];
segments[i] = null;
Modified: branches/db4o/freenet/src/freenet/client/async/USKChecker.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/USKChecker.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/client/async/USKChecker.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -21,7 +21,7 @@
private int dnfs;
USKChecker(USKCheckerCallback cb, ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent) {
- super(key, maxRetries, ctx, parent);
+ super(key, maxRetries, ctx, parent, false);
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Created USKChecker for "+key);
this.cb = cb;
Modified:
branches/db4o/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
===================================================================
---
branches/db4o/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
2009-02-17 23:15:14 UTC (rev 25674)
+++
branches/db4o/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -29,8 +29,14 @@
}
context.uskManager.update(usk, usk.suggestedEdition, context);
cb.onSuccess(result, state, container, context);
+ if(!persistent) removeFrom(container);
}
+ private void removeFrom(ObjectContainer container) {
+ usk.removeFrom(container);
+ container.delete(this);
+ }
+
public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
if(persistent) {
container.activate(cb, 1);
@@ -42,6 +48,7 @@
e = new FetchException(e, uri);
}
cb.onFailure(e, state, container, context);
+ if(!persistent) removeFrom(container);
}
public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context) {
Modified: branches/db4o/freenet/src/freenet/keys/USK.java
===================================================================
--- branches/db4o/freenet/src/freenet/keys/USK.java 2009-02-17 23:15:14 UTC
(rev 25674)
+++ branches/db4o/freenet/src/freenet/keys/USK.java 2009-02-17 23:37:53 UTC
(rev 25675)
@@ -6,6 +6,8 @@
import java.net.MalformedURLException;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.support.Fields;
import freenet.support.Logger;
@@ -167,4 +169,8 @@
}
return uri;
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: branches/db4o/freenet/src/freenet/node/fcp/FCPClient.java
===================================================================
--- branches/db4o/freenet/src/freenet/node/fcp/FCPClient.java 2009-02-17
23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/node/fcp/FCPClient.java 2009-02-17
23:37:53 UTC (rev 25675)
@@ -193,11 +193,11 @@
}
clientRequestsByIdentifier.remove(identifier);
}
- req.requestWasRemoved(container);
if(kill) {
if(logMINOR) Logger.minor(this, "Killing request "+req);
req.cancel(container, context);
}
+ req.requestWasRemoved(container);
if(completionCallback != null)
completionCallback.onRemove(req, container);
return true;
Modified: branches/db4o/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
===================================================================
--- branches/db4o/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
2009-02-17 23:15:14 UTC (rev 25674)
+++ branches/db4o/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
2009-02-17 23:37:53 UTC (rev 25675)
@@ -608,9 +608,9 @@
req = requestsByIdentifier.remove(identifier);
}
if(req != null) {
- req.requestWasRemoved(null);
if(kill)
req.cancel(null, server.core.clientContext);
+ req.requestWasRemoved(null);
}
return req;
}
_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs