Author: toad
Date: 2007-02-16 14:08:15 +0000 (Fri, 16 Feb 2007)
New Revision: 11809
Modified:
trunk/freenet/src/freenet/client/async/ClientGetState.java
trunk/freenet/src/freenet/client/async/ClientGetter.java
trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
trunk/freenet/src/freenet/client/async/USKFetcher.java
trunk/freenet/src/freenet/client/async/USKManager.java
trunk/freenet/src/freenet/client/async/USKRetriever.java
Log:
Refactoring:
SimpleSingleFileFetcher is now actually used.
getToken() now returns a long.
SplitFileFetcherSegment uses an array of ClientGetState's.
Related stuff.
Modified: trunk/freenet/src/freenet/client/async/ClientGetState.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetState.java 2007-02-16
04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/ClientGetState.java 2007-02-16
14:08:15 UTC (rev 11809)
@@ -7,11 +7,11 @@
* A ClientGetState.
* Represents a stage in the fetch process.
*/
-public abstract interface ClientGetState {
+public interface ClientGetState {
public void schedule();
public void cancel();
- public Object getToken();
+ public long getToken();
}
Modified: trunk/freenet/src/freenet/client/async/ClientGetter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetter.java 2007-02-16
04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/ClientGetter.java 2007-02-16
14:08:15 UTC (rev 11809)
@@ -75,7 +75,7 @@
finished = false;
}
currentState = SingleFileFetcher.create(this,
this, new ClientMetadata(),
- uri, ctx, actx,
ctx.maxNonSplitfileRetries, 0, false, null, true,
+ uri, ctx, actx,
ctx.maxNonSplitfileRetries, 0, false, -1, true,
returnBucket, true);
}
if(cancelled) cancel();
Modified: trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2007-02-16 04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2007-02-16 14:08:15 UTC (rev 11809)
@@ -22,14 +22,17 @@
*/
public class SimpleSingleFileFetcher extends BaseSingleFileFetcher implements
ClientGetState {
- SimpleSingleFileFetcher(ClientKey key, int maxRetries, FetcherContext
ctx, BaseClientGetter parent, GetCompletionCallback rcb, boolean isEssential) {
+ SimpleSingleFileFetcher(ClientKey key, int maxRetries, FetcherContext
ctx, BaseClientGetter parent, GetCompletionCallback rcb, boolean isEssential,
long l) {
super(key, maxRetries, ctx, parent);
this.rcb = rcb;
+ this.token = l;
+ parent.addBlock();
if(isEssential)
parent.addMustSucceedBlocks(1);
}
final GetCompletionCallback rcb;
+ final long token;
// Translate it, then call the real onFailure
public void onFailure(LowLevelGetException e) {
@@ -132,8 +135,8 @@
}
/** getToken() is not supported */
- public Object getToken() {
- throw new UnsupportedOperationException();
+ public long getToken() {
+ return token;
}
}
Modified: trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2007-02-16 04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2007-02-16 14:08:15 UTC (rev 11809)
@@ -30,7 +30,7 @@
import freenet.support.compress.Compressor;
import freenet.support.io.BucketTools;
-public class SingleFileFetcher extends SimpleSingleFileFetcher implements
ClientGetState {
+public class SingleFileFetcher extends SimpleSingleFileFetcher {
private static boolean logMINOR;
/** Original URI */
@@ -51,7 +51,6 @@
private FreenetURI thisKey;
private final LinkedList decompressors;
private final boolean dontTellClientGet;
- private Object token;
private final Bucket returnBucket;
/** If true, success/failure is immediately reported to the client, and
therefore we can check TOO_MANY_PATH_COMPONENTS. */
private final boolean isFinal;
@@ -63,16 +62,15 @@
public SingleFileFetcher(BaseClientGetter get, GetCompletionCallback
cb, ClientMetadata metadata,
ClientKey key, LinkedList metaStrings, FreenetURI
origURI, int addedMetaStrings, FetcherContext ctx,
ArchiveContext actx, int maxRetries, int recursionLevel,
- boolean dontTellClientGet, Object token, boolean
isEssential,
+ boolean dontTellClientGet, long l, boolean isEssential,
Bucket returnBucket, boolean isFinal) throws
FetchException {
- super(key, maxRetries, ctx, get, cb, isEssential);
+ super(key, maxRetries, ctx, get, cb, isEssential, l);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+key+" from "+origURI+" meta="+metaStrings.toString(), new Exception("debug"));
this.isFinal = isFinal;
this.cancelled = false;
this.returnBucket = returnBucket;
this.dontTellClientGet = dontTellClientGet;
- this.token = token;
//this.uri = uri;
//this.key = ClientKey.getBaseKey(uri);
//metaStrings = uri.listMetaStrings();
@@ -86,17 +84,15 @@
if(recursionLevel > ctx.maxRecursionLevel)
throw new
FetchException(FetchException.TOO_MUCH_RECURSION, "Too much recursion:
"+recursionLevel+" > "+ctx.maxRecursionLevel);
this.decompressors = new LinkedList();
- parent.addBlock();
}
/** Copy constructor, modifies a few given fields, don't call
schedule().
* Used for things like slave fetchers for MultiLevelMetadata,
therefore does not remember returnBucket,
* metaStrings etc. */
public SingleFileFetcher(SingleFileFetcher fetcher, Metadata newMeta,
GetCompletionCallback callback, FetcherContext ctx2) throws FetchException {
- super(fetcher.key, fetcher.maxRetries, ctx2, fetcher.parent,
callback, false);
+ super(fetcher.key, fetcher.maxRetries, ctx2, fetcher.parent,
callback, false, fetcher.token);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+fetcher.key+" meta="+fetcher.metaStrings.toString(), new Exception("debug"));
- this.token = fetcher.token;
this.returnBucket = null;
// We expect significant further processing in the parent
this.isFinal = false;
@@ -526,10 +522,6 @@
}
- public Object getToken() {
- return token;
- }
-
public boolean ignoreStore() {
return ctx.ignoreStore;
}
@@ -539,21 +531,26 @@
*/
public static ClientGetState create(BaseClientGetter parent,
GetCompletionCallback cb,
ClientMetadata clientMetadata, FreenetURI uri,
FetcherContext ctx, ArchiveContext actx,
- int maxRetries, int recursionLevel, boolean
dontTellClientGet, Object token, boolean isEssential,
+ int maxRetries, int recursionLevel, boolean
dontTellClientGet, long l, boolean isEssential,
Bucket returnBucket, boolean isFinal) throws
MalformedURLException, FetchException {
BaseClientKey key = BaseClientKey.getBaseKey(uri);
if((clientMetadata == null || clientMetadata.isTrivial()) &&
(!uri.hasMetaStrings()) &&
- ctx.allowSplitfiles == false &&
ctx.followRedirects == false && token == null &&
+ ctx.allowSplitfiles == false &&
ctx.followRedirects == false &&
returnBucket == null && key instanceof
ClientKey)
- return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, parent, cb, isEssential);
+ return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, parent, cb, isEssential, l);
+ else
+ if(logMINOR)
+ Logger.minor(SingleFileFetcher.class, "Not
creating SimpleSingleFileFetcher: cm="+clientMetadata+
+ " uri="+uri+"
("+uri.getAllMetaStrings()+") ctx.allowSplitfiles="+ctx.allowSplitfiles+
+
"ctx.followRedirects="+ctx.followRedirects+" returnBucket="+returnBucket+"
key="+key, new Exception());
if(key instanceof ClientKey)
- return new SingleFileFetcher(parent, cb,
clientMetadata, (ClientKey)key, uri.listMetaStrings(), uri, 0, ctx, actx,
maxRetries, recursionLevel, dontTellClientGet, token, isEssential,
returnBucket, isFinal);
+ return new SingleFileFetcher(parent, cb,
clientMetadata, (ClientKey)key, uri.listMetaStrings(), uri, 0, ctx, actx,
maxRetries, recursionLevel, dontTellClientGet, l, isEssential, returnBucket,
isFinal);
else {
- return uskCreate(parent, cb, clientMetadata, (USK)key,
uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, token, isEssential, returnBucket, isFinal);
+ return uskCreate(parent, cb, clientMetadata, (USK)key,
uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, l, isEssential, returnBucket, isFinal);
}
}
- private static ClientGetState uskCreate(BaseClientGetter parent,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetcherContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, Object token, boolean isEssential,
Bucket returnBucket, boolean isFinal) throws FetchException {
+ private static ClientGetState uskCreate(BaseClientGetter parent,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetcherContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, long l, boolean isEssential, Bucket
returnBucket, boolean isFinal) throws FetchException {
if(usk.suggestedEdition >= 0) {
// Return the latest known version but at least
suggestedEdition.
long edition = ctx.uskManager.lookup(usk);
@@ -573,7 +570,7 @@
SingleFileFetcher sf =
new SingleFileFetcher(parent,
myCB, clientMetadata, usk.getSSK(), metaStrings,
usk.getURI().addMetaStrings(metaStrings), 0, ctx, actx, maxRetries,
recursionLevel,
-
dontTellClientGet, token, false, returnBucket, isFinal);
+
dontTellClientGet, l, false, returnBucket, isFinal);
return sf;
}
} else {
@@ -586,7 +583,7 @@
ctx.uskManager.getFetcher(usk.copy(-usk.suggestedEdition), ctx, parent, false);
if(isEssential)
parent.addMustSucceedBlocks(1);
- fetcher.addCallback(new MyUSKFetcherCallback(parent,
cb, clientMetadata, usk, metaStrings, ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, token, returnBucket));
+ fetcher.addCallback(new MyUSKFetcherCallback(parent,
cb, clientMetadata, usk, metaStrings, ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, l, returnBucket));
return fetcher;
}
}
@@ -603,10 +600,10 @@
final int maxRetries;
final int recursionLevel;
final boolean dontTellClientGet;
- final Object token;
+ final long token;
final Bucket returnBucket;
- public MyUSKFetcherCallback(BaseClientGetter parent,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetcherContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, Object token, Bucket returnBucket) {
+ public MyUSKFetcherCallback(BaseClientGetter parent,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetcherContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, long l, Bucket returnBucket) {
this.parent = parent;
this.cb = cb;
this.clientMetadata = clientMetadata;
@@ -617,7 +614,7 @@
this.maxRetries = maxRetries;
this.recursionLevel = recursionLevel;
this.dontTellClientGet = dontTellClientGet;
- this.token = token;
+ this.token = l;
this.returnBucket = returnBucket;
}
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2007-02-16 04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2007-02-16 14:08:15 UTC (rev 11809)
@@ -60,11 +60,11 @@
/** Preferred bucket to return data in */
private final Bucket returnBucket;
private boolean finished;
- private Object token;
+ private long token;
public SplitFileFetcher(Metadata metadata, GetCompletionCallback rcb,
BaseClientGetter parent,
FetcherContext newCtx, LinkedList decompressors,
ClientMetadata clientMetadata,
- ArchiveContext actx, int recursionLevel, Bucket
returnBucket, Object token) throws FetchException, MetadataParseException {
+ ArchiveContext actx, int recursionLevel, Bucket
returnBucket, long token2) throws FetchException, MetadataParseException {
this.finished = false;
this.returnBucket = returnBucket;
this.fetchContext = newCtx;
@@ -143,7 +143,7 @@
segments[i] = new
SplitFileFetcherSegment(splitfileType, dataBlocks, checkBlocks, this,
archiveContext, fetchContext, maxTempLength, splitUseLengths, recursionLevel+1);
}
}
- this.token = token;
+ this.token = token2;
}
/** Return the final status of the fetch. Throws an exception, or
returns a
@@ -267,7 +267,7 @@
segments[i].cancel();
}
- public Object getToken() {
+ public long getToken() {
return token;
}
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-02-16 04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-02-16 14:08:15 UTC (rev 11809)
@@ -32,8 +32,8 @@
final short splitfileType;
final FreenetURI[] dataBlocks;
final FreenetURI[] checkBlocks;
- final BaseSingleFileFetcher[] dataBlockStatus;
- final BaseSingleFileFetcher[] checkBlockStatus;
+ final ClientGetState[] dataBlockStatus;
+ final ClientGetState[] checkBlockStatus;
final MinimalSplitfileBlock[] dataBuckets;
final MinimalSplitfileBlock[] checkBuckets;
final int minFetched;
@@ -72,8 +72,8 @@
} else throw new MetadataParseException("Unknown splitfile
type"+splitfileType);
finished = false;
decodedData = null;
- dataBlockStatus = new SingleFileFetcher[dataBlocks.length];
- checkBlockStatus = new SingleFileFetcher[checkBlocks.length];
+ dataBlockStatus = new ClientGetState[dataBlocks.length];
+ checkBlockStatus = new ClientGetState[checkBlocks.length];
dataBuckets = new MinimalSplitfileBlock[dataBlocks.length];
checkBuckets = new MinimalSplitfileBlock[checkBlocks.length];
for(int i=0;i<dataBuckets.length;i++) {
@@ -146,8 +146,7 @@
public synchronized void onSuccess(FetchResult result, ClientGetState
state) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(finished) return;
- Integer token = (Integer) ((SingleFileFetcher)state).getToken();
- int blockNo = token.intValue();
+ int blockNo = (int) state.getToken();
if(blockNo < dataBlocks.length) {
if(dataBlocks[blockNo] == null) {
Logger.error(this, "Block already finished:
"+blockNo);
@@ -176,11 +175,11 @@
startedDecode = true;
}
for(int i=0;i<dataBlockStatus.length;i++) {
- BaseSingleFileFetcher f = dataBlockStatus[i];
+ ClientGetState f = dataBlockStatus[i];
if(f != null) f.cancel();
}
for(int i=0;i<checkBlockStatus.length;i++) {
- BaseSingleFileFetcher f = checkBlockStatus[i];
+ ClientGetState f = checkBlockStatus[i];
if(f != null) f.cancel();
}
Runnable r = new Decoder();
@@ -246,42 +245,42 @@
} catch (IOException e) {
Logger.error(this, "Bucket error while
healing: "+e, e);
}
- }
- // Now insert *ALL* blocks on which we had at least one
failure, and didn't eventually succeed
- for(int i=0;i<dataBlockStatus.length;i++) {
- boolean heal = false;
- if(!dataBlocksSucceeded[i]) {
- BaseSingleFileFetcher fetcher =
dataBlockStatus[i];
- if(fetcher.getRetryCount() > 0)
- heal = true;
+ // Now insert *ALL* blocks on which we had at
least one failure, and didn't eventually succeed
+ for(int i=0;i<dataBlockStatus.length;i++) {
+ boolean heal = false;
+ if(!dataBlocksSucceeded[i]) {
+ SimpleSingleFileFetcher sf =
(SimpleSingleFileFetcher) dataBlockStatus[i];
+ if(sf.getRetryCount() > 0)
+ heal = true;
+ }
+ if(heal) {
+
queueHeal(dataBuckets[i].getData());
+ } else {
+ dataBuckets[i].data.free();
+ dataBuckets[i].data = null;
+ }
+ dataBuckets[i] = null;
+ dataBlockStatus[i] = null;
+ dataBlocks[i] = null;
}
- if(heal) {
- queueHeal(dataBuckets[i].getData());
- } else {
- dataBuckets[i].data.free();
- dataBuckets[i].data = null;
+ for(int i=0;i<checkBlockStatus.length;i++) {
+ boolean heal = false;
+ if(!checkBlocksSucceeded[i]) {
+ SimpleSingleFileFetcher sf =
(SimpleSingleFileFetcher) checkBlockStatus[i];
+ if(sf.getRetryCount() > 0)
+ heal = true;
+ }
+ if(heal) {
+
queueHeal(checkBuckets[i].getData());
+ } else {
+ checkBuckets[i].data.free();
+ }
+ checkBuckets[i] = null;
+ checkBlockStatus[i] = null;
+ checkBlocks[i] = null;
}
- dataBuckets[i] = null;
- dataBlockStatus[i] = null;
- dataBlocks[i] = null;
}
- for(int i=0;i<checkBlockStatus.length;i++) {
- boolean heal = false;
- if(!checkBlocksSucceeded[i]) {
- BaseSingleFileFetcher fetcher =
checkBlockStatus[i];
- if(fetcher.getRetryCount() > 0)
- heal = true;
- }
- if(heal) {
- queueHeal(checkBuckets[i].getData());
- } else {
- checkBuckets[i].data.free();
- }
- checkBuckets[i] = null;
- checkBlockStatus[i] = null;
- checkBlocks[i] = null;
- }
}
}
@@ -294,8 +293,7 @@
/** This is after any retries and therefore is either out-of-retries or
fatal */
public synchronized void onFailure(FetchException e, ClientGetState
state) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
- Integer token = (Integer) ((SingleFileFetcher)state).getToken();
- int blockNo = token.intValue();
+ int blockNo = (int) state.getToken();
if(blockNo < dataBlocks.length) {
if(dataBlocks[blockNo] == null) {
Logger.error(this, "Block already finished:
"+blockNo);
@@ -337,7 +335,7 @@
return;
}
for(int i=0;i<dataBlockStatus.length;i++) {
- BaseSingleFileFetcher f = dataBlockStatus[i];
+ ClientGetState f = dataBlockStatus[i];
if(f != null)
f.cancel();
MinimalSplitfileBlock b = dataBuckets[i];
@@ -348,7 +346,7 @@
dataBuckets[i] = null;
}
for(int i=0;i<checkBlockStatus.length;i++) {
- BaseSingleFileFetcher f = checkBlockStatus[i];
+ ClientGetState f = checkBlockStatus[i];
if(f != null)
f.cancel();
MinimalSplitfileBlock b = checkBuckets[i];
@@ -370,12 +368,16 @@
continue;
}
// FIXME maybe within a non-FECced splitfile at
least?
- if(dataBlocks[i].getKeyType().equals("USK"))
+ if(dataBlocks[i].getKeyType().equals("USK")) {
fail(new
FetchException(FetchException.INVALID_METADATA, "Cannot have USKs within a
splitfile!"));
+ return;
+ }
if(dataBlockStatus[i] != null) {
Logger.error(this, "Scheduling twice?
dataBlockStatus["+i+"] = "+dataBlockStatus[i]);
- } else dataBlockStatus[i] =
- (SingleFileFetcher)
SingleFileFetcher.create(parentFetcher.parent, this, null, dataBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxNonSplitfileRetries,
recursionLevel, true, new Integer(i), true, null, false);
+ } else {
+ dataBlockStatus[i] =
+ (ClientGetState)
SingleFileFetcher.create(parentFetcher.parent, this, null, dataBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxNonSplitfileRetries,
recursionLevel, true, i, true, null, false);
+ }
}
for(int i=0;i<checkBlocks.length;i++) {
if(checkBlocks[i] == null) {
@@ -383,12 +385,14 @@
continue;
}
// FIXME maybe within a non-FECced splitfile at
least?
- if(checkBlocks[i].getKeyType().equals("USK"))
+ if(checkBlocks[i].getKeyType().equals("USK")) {
fail(new
FetchException(FetchException.INVALID_METADATA, "Cannot have USKs within a
splitfile!"));
+ return;
+ }
if(checkBlockStatus[i] != null) {
Logger.error(this, "Scheduling twice?
dataBlockStatus["+i+"] = "+checkBlockStatus[i]);
} else checkBlockStatus[i] =
- (SingleFileFetcher)
SingleFileFetcher.create(parentFetcher.parent, this, null, checkBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxNonSplitfileRetries,
recursionLevel, true, new Integer(dataBlocks.length+i), false, null, false);
+ (ClientGetState)
SingleFileFetcher.create(parentFetcher.parent, this, null, checkBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxNonSplitfileRetries,
recursionLevel, true, dataBlocks.length+i, false, null, false);
}
for(int i=0;i<dataBlocks.length;i++) {
if(dataBlockStatus[i] != null)
Modified: trunk/freenet/src/freenet/client/async/USKFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKFetcher.java 2007-02-16
04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/USKFetcher.java 2007-02-16
14:08:15 UTC (rev 11809)
@@ -198,14 +198,14 @@
private boolean started;
- private Object token;
+ private int token;
- USKFetcher(USK origUSK, USKManager manager, FetcherContext ctx,
BaseClientGetter parent, int minFailures, boolean pollForever, boolean
keepLastData, Object token) {
+ USKFetcher(USK origUSK, USKManager manager, FetcherContext ctx,
BaseClientGetter parent, int minFailures, boolean pollForever, boolean
keepLastData, int token) {
this(origUSK, manager, ctx, parent, minFailures, pollForever,
DEFAULT_MAX_MIN_FAILURES, keepLastData, token);
}
// FIXME use this!
- USKFetcher(USK origUSK, USKManager manager, FetcherContext ctx,
BaseClientGetter parent, int minFailures, boolean pollForever, long
maxProbeEditions, boolean keepLastData, Object token) {
+ USKFetcher(USK origUSK, USKManager manager, FetcherContext ctx,
BaseClientGetter parent, int minFailures, boolean pollForever, long
maxProbeEditions, boolean keepLastData, int token) {
this.parent = parent;
this.maxMinFailures = maxProbeEditions;
this.origUSK = origUSK;
@@ -517,7 +517,7 @@
this.killOnLoseSubscribers = true;
}
- public Object getToken() {
+ public long getToken() {
return token;
}
Modified: trunk/freenet/src/freenet/client/async/USKManager.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKManager.java 2007-02-16
04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/USKManager.java 2007-02-16
14:08:15 UTC (rev 11809)
@@ -89,14 +89,14 @@
if((f.parent.priorityClass == parent.priorityClass) &&
f.ctx.equals(ctx) && f.keepLastData == keepLastData)
return f;
}
- f = new USKFetcher(usk, this, ctx, parent, 3, false,
keepLastData, null);
+ f = new USKFetcher(usk, this, ctx, parent, 3, false,
keepLastData, -1);
fetchersByUSK.put(usk, f);
return f;
}
public USKFetcher getFetcherForInsertDontSchedule(USK usk, short
prioClass, USKFetcherCallback cb, Object client) {
USKFetcher f = new USKFetcher(usk, this,
backgroundFetchContext,
- new USKFetcherWrapper(usk, prioClass,
chkRequestScheduler, sskRequestScheduler, client), 3, false, true, null);
+ new USKFetcherWrapper(usk, prioClass,
chkRequestScheduler, sskRequestScheduler, client), 3, false, true, -1);
f.addCallback(cb);
return f;
}
@@ -107,7 +107,7 @@
synchronized(this) {
USKFetcher f = (USKFetcher)
backgroundFetchersByClearUSK.get(clear);
if(f == null) {
- f = new USKFetcher(usk, this,
backgroundFetchContext, new USKFetcherWrapper(usk,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
this), 10, true, false, null);
+ f = new USKFetcher(usk, this,
backgroundFetchContext, new USKFetcherWrapper(usk,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
this), 10, true, false, -1);
sched = f;
backgroundFetchersByClearUSK.put(clear, f);
}
@@ -181,7 +181,7 @@
if(runBackgroundFetch) {
USKFetcher f = (USKFetcher)
backgroundFetchersByClearUSK.get(clear);
if(f == null) {
- f = new USKFetcher(origUSK, this,
backgroundFetchContext, new USKFetcherWrapper(origUSK,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
client), 10, true, false, null);
+ f = new USKFetcher(origUSK, this,
backgroundFetchContext, new USKFetcherWrapper(origUSK,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
client), 10, true, false, -1);
sched = f;
backgroundFetchersByClearUSK.put(clear,
f);
}
Modified: trunk/freenet/src/freenet/client/async/USKRetriever.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKRetriever.java 2007-02-16
04:01:09 UTC (rev 11808)
+++ trunk/freenet/src/freenet/client/async/USKRetriever.java 2007-02-16
14:08:15 UTC (rev 11809)
@@ -23,7 +23,8 @@
final FetcherContext ctx;
final USKRetrieverCallback cb;
- public USKRetriever(FetcherContext fctx, short prio,
ClientRequestScheduler chkSched, ClientRequestScheduler sskSched, Object
client, USKRetrieverCallback cb) {
+ public USKRetriever(FetcherContext fctx, short prio,
ClientRequestScheduler chkSched,
+ ClientRequestScheduler sskSched, Object client,
USKRetrieverCallback cb) {
super(prio, chkSched, sskSched, client);
this.ctx = fctx;
this.cb = cb;
@@ -38,7 +39,7 @@
try {
SingleFileFetcher getter =
(SingleFileFetcher)
SingleFileFetcher.create(this, this, new ClientMetadata(), uri, ctx, new
ArchiveContext(ctx.maxArchiveLevels),
- ctx.maxNonSplitfileRetries, 0,
true, key.copy(l), true, null, false);
+ ctx.maxNonSplitfileRetries, 0,
true, l, true, null, false);
getter.schedule();
} catch (MalformedURLException e) {
Logger.error(this, "Impossible: "+e, e);
@@ -48,14 +49,11 @@
}
public void onSuccess(FetchResult result, ClientGetState state) {
- Object token = state.getToken();
- USK key = (USK) token;
- cb.onFound(key.suggestedEdition, result);
+ cb.onFound(state.getToken(), result);
}
public void onFailure(FetchException e, ClientGetState state) {
- Object token = state.getToken();
- Logger.error(this, "Found "+token+" but failed to fetch
edition: "+e, e);
+ Logger.error(this, "Found edition "+state.getToken()+" but
failed to fetch edition: "+e, e);
}
public void onBlockSetFinished(ClientGetState state) {