Author: toad
Date: 2005-11-11 00:20:30 +0000 (Fri, 11 Nov 2005)
New Revision: 7522
Modified:
trunk/freenet/src/freenet/client/BlockFetcher.java
trunk/freenet/src/freenet/client/FetchException.java
trunk/freenet/src/freenet/client/Fetcher.java
trunk/freenet/src/freenet/client/FetcherContext.java
trunk/freenet/src/freenet/client/Metadata.java
trunk/freenet/src/freenet/client/Segment.java
trunk/freenet/src/freenet/client/SplitFetcher.java
trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
trunk/freenet/src/freenet/client/StdSplitfileBlock.java
trunk/freenet/src/freenet/node/Version.java
trunk/freenet/src/freenet/support/BucketTools.java
Log:
158:
Splitfile insert/request more or less working.
I don't think redundancy is working though yet.
Modified: trunk/freenet/src/freenet/client/BlockFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/BlockFetcher.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/client/BlockFetcher.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -7,7 +7,7 @@
import freenet.support.Bucket;
import freenet.support.Logger;
-public class BlockFetcher extends StdSplitfileBlock implements Runnable {
+public class BlockFetcher extends StdSplitfileBlock {
private final Segment segment;
final FreenetURI uri;
@@ -26,6 +26,7 @@
}
public void run() {
+ Logger.minor(this, "Running: "+this);
// Already added to runningFetches.
// But need to make sure we are removed when we exit.
try {
@@ -35,11 +36,15 @@
}
}
+ public String toString() {
+ return super.toString()+" tries="+completedTries+" uri="+uri;
+ }
+
private void realRun() {
// Do the fetch
Fetcher f = new Fetcher(uri, this.segment.blockFetchContext);
try {
- FetchResult fr = f.realRun(new ClientMetadata(),
this.segment.recursionLevel, uri,
+ FetchResult fr = f.realRun(new ClientMetadata(),
segment.recursionLevel, uri,
(!this.segment.nonFullBlocksAllowed) ||
dontEnterImplicitArchives);
actuallyFetched = true;
fetchedData = fr.data;
Modified: trunk/freenet/src/freenet/client/FetchException.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java 2005-11-10
19:16:28 UTC (rev 7521)
+++ trunk/freenet/src/freenet/client/FetchException.java 2005-11-11
00:20:30 UTC (rev 7522)
@@ -2,6 +2,8 @@
import java.io.IOException;
+import freenet.support.Logger;
+
/**
* Generic exception thrown by a Fetcher. All other exceptions are converted
to one of
* these to tell the client.
@@ -20,29 +22,34 @@
public FetchException(int m) {
super(getMessage(m));
mode = m;
+ Logger.minor(this, "FetchException("+getMessage(mode)+")",
this);
}
public FetchException(MetadataParseException e) {
super(getMessage(INVALID_METADATA)+": "+e.getMessage());
mode = INVALID_METADATA;
initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
}
public FetchException(ArchiveFailureException e) {
super(getMessage(INVALID_METADATA)+": "+e.getMessage());
mode = ARCHIVE_FAILURE;
initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
}
public FetchException(int mode, IOException e) {
super(getMessage(INVALID_METADATA)+": "+e.getMessage());
this.mode = mode;
initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e.getMessage(),e);
}
public FetchException(int mode, String msg) {
super(getMessage(mode)+": "+msg);
this.mode = mode;
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+msg,this);
}
private static String getMessage(int mode) {
@@ -66,7 +73,7 @@
case TOO_MANY_ARCHIVE_RESTARTS:
return "Request was restarted too many times due to
archives changing";
case TOO_MUCH_RECURSION:
- return "Too many redirects"; // FIXME: ???
+ return "Too many redirects (too much recursion)"; //
FIXME: ???
case NOT_IN_ARCHIVE:
return "File not in archive";
case HAS_MORE_METASTRINGS:
Modified: trunk/freenet/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/client/Fetcher.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -93,6 +93,7 @@
*/
FetchResult realRun(ClientMetadata dm, int recursionLevel, FreenetURI
uri, boolean dontEnterImplicitArchives)
throws FetchException, MetadataParseException, ArchiveFailureException,
ArchiveRestartException {
+ Logger.minor(this, "Running fetch for: "+uri);
ClientKey key;
try {
key = ClientKey.getBaseKey(uri);
@@ -103,7 +104,7 @@
recursionLevel++;
if(recursionLevel > ctx.maxRecursionLevel)
- throw new
FetchException(FetchException.TOO_MUCH_RECURSION);
+ throw new
FetchException(FetchException.TOO_MUCH_RECURSION, ""+recursionLevel+" should be
< "+ctx.maxRecursionLevel);
// Do the fetch
KeyBlock block;
@@ -142,7 +143,7 @@
throw new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage());
} catch (IOException e) {
Logger.error(this, "Could not capture data - disk
full?: "+e, e);
- throw new FetchException(FetchException.BUCKET_ERROR);
+ throw new FetchException(FetchException.BUCKET_ERROR,
e);
}
ctx.eventProducer.produceEvent(new DecodedBlockEvent(key));
@@ -160,7 +161,7 @@
try {
metadata =
Metadata.construct(BucketTools.toByteArray(data));
} catch (IOException e) {
- throw new FetchException(FetchException.BUCKET_ERROR);
+ throw new FetchException(FetchException.BUCKET_ERROR,
e);
}
ctx.eventProducer.produceEvent(new FetchedMetadataEvent());
@@ -297,7 +298,7 @@
try {
metadata =
Metadata.construct(metadataBucket);
} catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR);
+ throw new
FetchException(FetchException.BUCKET_ERROR, e);
}
return runMetadata(dm, recursionLevel+1, key,
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives);
}
@@ -308,7 +309,7 @@
else
newCtx = new FetcherContext(ctx,
FetcherContext.SPLITFILE_DEFAULT_MASK);
- SplitFetcher sf = new SplitFetcher(metadata,
archiveContext, newCtx);
+ SplitFetcher sf = new SplitFetcher(metadata,
archiveContext, newCtx, recursionLevel);
Bucket sfResult = sf.fetch(); // will throw in event of
error
if(metadata.compressed) {
Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
Modified: trunk/freenet/src/freenet/client/FetcherContext.java
===================================================================
--- trunk/freenet/src/freenet/client/FetcherContext.java 2005-11-10
19:16:28 UTC (rev 7521)
+++ trunk/freenet/src/freenet/client/FetcherContext.java 2005-11-11
00:20:30 UTC (rev 7522)
@@ -68,7 +68,7 @@
this.maxTempLength = ctx.maxTempLength;
this.archiveManager = ctx.archiveManager;
this.bucketFactory = ctx.bucketFactory;
- this.maxRecursionLevel = 0;
+ this.maxRecursionLevel = 1;
this.maxArchiveRestarts = 0;
this.dontEnterImplicitArchives = true;
this.random = ctx.random;
Modified: trunk/freenet/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/client/Metadata.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -572,6 +572,7 @@
if(fullKeys) flags |= FLAGS_FULL_KEYS;
if(splitUseLengths) flags |= FLAGS_SPLIT_USE_LENGTHS;
if(compressed) flags |= FLAGS_COMPRESSED;
+ dos.writeShort(flags);
}
if(documentType == ZIP_MANIFEST) {
Modified: trunk/freenet/src/freenet/client/Segment.java
===================================================================
--- trunk/freenet/src/freenet/client/Segment.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/client/Segment.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -34,8 +34,6 @@
private boolean started;
/** Has the segment finished processing? Irreversible. */
private boolean finished;
- /** Error code, or -1 */
- private short fetchError;
/** Bucket to store the data retrieved, after it has been decoded */
private Bucket decodedData;
/** Recently completed fetches */
@@ -57,7 +55,7 @@
* @param splitfileCheckBlocks The check blocks to fetch.
*/
public Segment(short splitfileType, FreenetURI[] splitfileDataBlocks,
FreenetURI[] splitfileCheckBlocks,
- SplitFetcher fetcher, ArchiveContext actx,
FetcherContext fctx, long maxTempLength, boolean useLengths, int
recursionLevel) throws MetadataParseException {
+ SplitFetcher fetcher, ArchiveContext actx,
FetcherContext fctx, long maxTempLength, boolean useLengths, int recLevel)
throws MetadataParseException {
this.splitfileType = splitfileType;
dataBlocks = splitfileDataBlocks;
checkBlocks = splitfileCheckBlocks;
@@ -75,7 +73,6 @@
nonFullBlocksAllowed = useLengths;
started = false;
finished = false;
- fetchError = -1;
decodedData = null;
dataBlockStatus = new BlockFetcher[dataBlocks.length];
checkBlockStatus = new BlockFetcher[checkBlocks.length];
@@ -92,9 +89,14 @@
}
recentlyCompletedFetches = new LinkedList();
runningFetches = new LinkedList();
- this.recursionLevel = recursionLevel;
// FIXME be a bit more flexible here depending on flags
- blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
+ if(useLengths) {
+ blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
+ this.recursionLevel = recLevel;
+ } else {
+ blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
+ this.recursionLevel = 0;
+ }
}
/**
@@ -108,8 +110,8 @@
* If there was an error, throw it now.
*/
public void throwError() throws FetchException {
- if(fetchError != -1)
- throw new FetchException(fetchError);
+ if(failureException != null)
+ throw failureException;
}
/**
@@ -173,7 +175,7 @@
*/
public void finished(SplitfileBlock[] succeeded, SplitfileBlock[]
failed, SplitfileBlock[] fatalErrors) {
- if(succeeded.length > minFetched)
+ if(succeeded.length >= minFetched)
// Not finished yet, need to decode
successfulFetch();
else {
@@ -199,13 +201,12 @@
// Now have all the data blocks (not
necessarily all the check blocks)
}
- Bucket output =
fetcherContext.bucketFactory.makeBucket(-1);
- OutputStream os = output.getOutputStream();
+ decodedData =
fetcherContext.bucketFactory.makeBucket(-1);
+ OutputStream os = decodedData.getOutputStream();
for(int i=0;i<dataBlockStatus.length;i++) {
BlockFetcher status = dataBlockStatus[i];
Bucket data = status.fetchedData;
BucketTools.copyTo(data, os, Long.MAX_VALUE);
- fetcherContext.bucketFactory.freeBucket(data);
}
os.close();
// Must set finished BEFORE calling parentFetcher.
Modified: trunk/freenet/src/freenet/client/SplitFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -55,7 +55,7 @@
/** Accept non-full splitfile chunks? */
private boolean splitUseLengths;
- public SplitFetcher(Metadata metadata, ArchiveContext archiveContext,
FetcherContext ctx) throws MetadataParseException {
+ public SplitFetcher(Metadata metadata, ArchiveContext archiveContext,
FetcherContext ctx, int recursionLevel) throws MetadataParseException {
actx = archiveContext;
fctx = ctx;
overrideLength = metadata.dataLength;
@@ -80,7 +80,7 @@
} else throw new MetadataParseException("Unknown splitfile
format: "+splitfileType);
segments = new Segment[segmentCount]; // initially null on all
entries
if(segmentCount == 1) {
- segments[0] = new Segment(splitfileType,
splitfileDataBlocks, splitfileCheckBlocks, this, archiveContext, ctx,
maxTempLength, splitUseLengths, blockLength);
+ segments[0] = new Segment(splitfileType,
splitfileDataBlocks, splitfileCheckBlocks, this, archiveContext, ctx,
maxTempLength, splitUseLengths, recursionLevel+1);
} else {
int dataBlocksPtr = 0;
int checkBlocksPtr = 0;
@@ -149,6 +149,7 @@
private Segment chooseUnstartedSegment() {
synchronized(unstartedSegments) {
+ if(unstartedSegments.isEmpty()) return null;
int x = fctx.random.nextInt(unstartedSegments.size());
Segment s = (Segment) unstartedSegments.get(x);
unstartedSegments.remove(x);
@@ -184,6 +185,7 @@
long max = (finalLength < 0 ? 0 : (finalLength
- bytesWritten));
bytesWritten += s.writeDecodedDataTo(os, max);
}
+ os.close();
} catch (IOException e) {
throw new FetchException(FetchException.BUCKET_ERROR,
e);
} finally {
@@ -206,6 +208,10 @@
public void segmentFinished(Segment segment) {
synchronized(this) {
+ boolean allDone = true;
+ for(int i=0;i<segments.length;i++)
+ if(!segments[i].isFinished()) allDone = false;
+ if(allDone) allSegmentsFinished = true;
notifyAll();
}
}
Modified: trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-10
19:16:28 UTC (rev 7521)
+++ trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-11
00:20:30 UTC (rev 7522)
@@ -11,6 +11,7 @@
import freenet.support.Bucket;
import freenet.support.BucketFactory;
import freenet.support.LRUHashtable;
+import freenet.support.Logger;
/**
* FECCodec implementation using the onion code.
@@ -100,6 +101,7 @@
}
public void realDecode(SplitfileBlock[] dataBlockStatus,
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory bf) throws
IOException {
+ Logger.minor(this, "Doing decode: "+dataBlockStatus.length+"
data blocks, "+checkBlockStatus.length+" check blocks, block length
"+blockLength+" with "+this);
if(dataBlockStatus.length + checkBlockStatus.length != n)
throw new IllegalArgumentException();
if(dataBlockStatus.length != k)
@@ -167,7 +169,7 @@
}
}
}
- for(int i=0;i<k;i++) {
+ for(int i=0;i<n;i++) {
if(writers[i] != null) writers[i].close();
if(readers[i] != null) readers[i].close();
}
@@ -205,6 +207,7 @@
* Do the actual encode.
*/
private void realEncode(SplitfileBlock[] dataBlockStatus,
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory bf) throws
IOException {
+ Logger.minor(this, "Doing encode: "+dataBlockStatus.length+"
data blocks, "+checkBlockStatus.length+" check blocks, block length
"+blockLength+" with "+this);
if(dataBlockStatus.length + checkBlockStatus.length != n)
throw new IllegalArgumentException();
if(dataBlockStatus.length != k)
@@ -259,10 +262,10 @@
}
}
}
- for(int i=0;i<n;i++) {
+ for(int i=0;i<k;i++)
+ if(readers[i] != null) readers[i].close();
+ for(int i=0;i<n-k;i++)
if(writers[i] != null) writers[i].close();
- if(readers[i] != null) readers[i].close();
- }
// Set new buckets only after have a successful decode.
for(int i=0;i<checkBlockStatus.length;i++) {
checkBlockStatus[i].setData(buckets[i+k]);
Modified: trunk/freenet/src/freenet/client/StdSplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/StdSplitfileBlock.java 2005-11-10
19:16:28 UTC (rev 7521)
+++ trunk/freenet/src/freenet/client/StdSplitfileBlock.java 2005-11-11
00:20:30 UTC (rev 7522)
@@ -35,6 +35,7 @@
public void start() {
checkStartable();
+ Logger.minor(this, "Starting "+this);
try {
Thread t = new Thread(this);
t.setDaemon(true);
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2005-11-10 19:16:28 UTC (rev
7521)
+++ trunk/freenet/src/freenet/node/Version.java 2005-11-11 00:20:30 UTC (rev
7522)
@@ -20,7 +20,7 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- public static final int buildNumber = 157;
+ public static final int buildNumber = 158;
/** Oldest build of Fred we will talk to */
public static final int lastGoodBuild = 153;
Modified: trunk/freenet/src/freenet/support/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/BucketTools.java 2005-11-10 19:16:28 UTC
(rev 7521)
+++ trunk/freenet/src/freenet/support/BucketTools.java 2005-11-11 00:20:30 UTC
(rev 7522)
@@ -316,6 +316,7 @@
Bucket bucket = bucketFactory.makeBucket(data.length);
OutputStream os = bucket.getOutputStream();
os.write(data);
+ os.close();
bucket.setReadOnly();
return bucket;
}