Author: toad
Date: 2005-11-09 16:09:35 +0000 (Wed, 09 Nov 2005)
New Revision: 7502
Added:
trunk/freenet/src/freenet/client/InsertSegment.java
trunk/freenet/src/freenet/client/RetryTracker.java
Modified:
trunk/freenet/src/freenet/client/FECCodec.java
trunk/freenet/src/freenet/client/FetchResult.java
trunk/freenet/src/freenet/client/Fetcher.java
trunk/freenet/src/freenet/client/FileInserter.java
trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
trunk/freenet/src/freenet/client/InserterContext.java
trunk/freenet/src/freenet/client/Segment.java
trunk/freenet/src/freenet/client/SplitFetcher.java
trunk/freenet/src/freenet/client/SplitInserter.java
trunk/freenet/src/freenet/client/SplitfileBlock.java
trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
trunk/freenet/src/freenet/support/compress/Compressor.java
trunk/freenet/src/freenet/support/compress/GzipCompressor.java
Log:
Lots of work on splitfiles.
New RetryTracker object will be used by Segment, SplitInserter and others.
Modified: trunk/freenet/src/freenet/client/FECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/FECCodec.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/FECCodec.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -14,6 +14,18 @@
*/
abstract class FECCodec {
+ public static int getCodecMaxSegmentSize(short splitfileType) {
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT)
+ return -1;
+ if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD)
+ return 128;
+ throw new IllegalArgumentException();
+ }
+
+ /**
+ * Get a codec where we know both the number of data blocks and the
number
+ * of check blocks, and the codec type. Normally for decoding.
+ */
public static FECCodec getCodec(short splitfileType, int dataBlocks,
int checkBlocks) {
if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT)
return null;
@@ -23,6 +35,22 @@
}
/**
+ * Get a codec where we know only the number of data blocks and the
codec
+ * type. Normally for encoding.
+ */
+ public static FECCodec getCodec(short splitfileType, int dataBlocks) {
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT)
+ return null;
+ if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
+ int checkBlocks = dataBlocks;
+ checkBlocks += (dataBlocks>>1);
+ if((dataBlocks & 1) == 1) checkBlocks++;
+ return StandardOnionFECCodec.getInstance(dataBlocks,
checkBlocks);
+ }
+ else return null;
+ }
+
+ /**
* Decode all missing *data* blocks.
* Requires that the total number of available blocks is equal to or
greater than the length of
* the data blocks array. (i.e. it is > k).
@@ -37,12 +65,16 @@
/**
* Encode all missing *check* blocks.
* Requires that all the data blocks be present.
- * @param dataBlockStatus The data blocks.
- * @param checkBlockStatus The check blocks.
+ * @param dataBlocks The data blocks.
+ * @param checkBlocks The check blocks.
* @param blockLength The block length in bytes.
* @param bf The BucketFactory to use to generate buckets.
* @throws IOException If there is an error in decoding caused by an
I/O error (usually involving buckets).
*/
- public abstract void encode(BlockStatus[] dataBlockStatus,
BlockStatus[] checkBlockStatus, int blockLength, BucketFactory bucketFactory);
+ public abstract void encode(SplitfileBlock[] dataBlocks,
SplitfileBlock[] checkBlocks, int blockLength, BucketFactory bucketFactory);
+ /**
+ * How many check blocks?
+ */
+ public abstract int countCheckBlocks();
}
Modified: trunk/freenet/src/freenet/client/FetchResult.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchResult.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/FetchResult.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -18,6 +18,15 @@
data = fetched;
}
+ /**
+ * Create a FetchResult with a new Bucket of data, but everything else
+ * the same as the old one.
+ */
+ public FetchResult(FetchResult fr, Bucket output) {
+ this.data = output;
+ this.metadata = fr.metadata;
+ }
+
/** Get the MIME type of the fetched data.
* If unknown, returns application/octet-stream. */
public String getMimeType() {
Modified: trunk/freenet/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/Fetcher.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -11,6 +11,7 @@
import freenet.support.Bucket;
import freenet.support.BucketTools;
import freenet.support.Logger;
+import freenet.support.compress.Compressor;
/** Class that does the actual fetching. Does not have to have a user friendly
* interface!
@@ -240,7 +241,19 @@
return runMetadata(dm,
recursionLevel+1, key, metaStrings, metadata, container, thisKey,
dontEnterImplicitArchives);
}
}
- return realRun(dm, recursionLevel, uri,
dontEnterImplicitArchives);
+ FetchResult fr = realRun(dm, recursionLevel, uri,
dontEnterImplicitArchives);
+ if(metadata.compressed) {
+ Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
+ Bucket data = fr.data;
+ Bucket output;
+ try {
+ output = codec.decompress(data,
ctx.bucketFactory);
+ } catch (IOException e) {
+ throw new
FetchException(FetchException.BUCKET_ERROR, e);
+ }
+ return new FetchResult(fr, output);
+ }
+ return fr;
} else if(metadata.isSplitfile()) {
// Straight data splitfile.
// Might be used by parents for something else, in
which case they will set dontEnterImplicitArchives.
@@ -265,6 +278,14 @@
SplitFetcher sf = new SplitFetcher(metadata,
archiveContext, newCtx);
Bucket sfResult = sf.fetch(); // will throw in event of
error
+ if(metadata.compressed) {
+ Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
+ try {
+ sfResult = codec.decompress(sfResult,
ctx.bucketFactory);
+ } catch (IOException e) {
+ throw new
FetchException(FetchException.BUCKET_ERROR, e);
+ }
+ }
return new FetchResult(dm, sfResult);
} else {
Logger.error(this, "Don't know what to do with
metadata: "+metadata);
Modified: trunk/freenet/src/freenet/client/FileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/FileInserter.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/FileInserter.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -97,7 +97,7 @@
}
// Too big, encode to a splitfile
- SplitInserter splitInsert = new SplitInserter(data,
block.clientMetadata);
+ SplitInserter splitInsert = new SplitInserter(data,
block.clientMetadata, bestCodec, ctx.splitfileAlgorithm, ctx);
return splitInsert.run();
}
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-09 15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-09 16:09:35 UTC (rev 7502)
@@ -65,7 +65,7 @@
}
public FreenetURI insert(InsertBlock insert) throws InserterException {
- InserterContext context = new InserterContext(client,
bucketFactory);
+ InserterContext context = new InserterContext(client,
bucketFactory, random);
FileInserter i = new FileInserter(context);
return i.run(insert, false);
}
Added: trunk/freenet/src/freenet/client/InsertSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/InsertSegment.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/InsertSegment.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -0,0 +1,16 @@
+package freenet.client;
+
+/**
+ * Segment of a splitfile, for insertion purposes.
+ */
+public class InsertSegment {
+
+ final short splitfileAlgorithm;
+ final SplitfileBlock[] origDataBlocks;
+
+ public InsertSegment(short splitfileAlgorithm, SplitfileBlock[]
origDataBlocks) {
+ this.splitfileAlgorithm = splitfileAlgorithm;
+ this.origDataBlocks = origDataBlocks;
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/InserterContext.java
===================================================================
--- trunk/freenet/src/freenet/client/InserterContext.java 2005-11-09
15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/client/InserterContext.java 2005-11-09
16:09:35 UTC (rev 7502)
@@ -1,5 +1,6 @@
package freenet.client;
+import freenet.crypt.RandomSource;
import freenet.node.SimpleLowLevelClient;
import freenet.support.BucketFactory;
@@ -10,11 +11,15 @@
final BucketFactory bf;
/** If true, don't try to compress the data */
final boolean dontCompress;
+ final RandomSource random;
+ final short splitfileAlgorithm;
- public InserterContext(SimpleLowLevelClient client, BucketFactory bf) {
+ public InserterContext(SimpleLowLevelClient client, BucketFactory bf,
RandomSource random) {
this.client = client;
this.bf = bf;
+ this.random = random;
dontCompress = false;
+ splitfileAlgorithm = Metadata.SPLITFILE_ONION_STANDARD;
}
}
Added: trunk/freenet/src/freenet/client/RetryTracker.java
===================================================================
--- trunk/freenet/src/freenet/client/RetryTracker.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/RetryTracker.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -0,0 +1,232 @@
+package freenet.client;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Vector;
+
+import freenet.crypt.RandomSource;
+
+/**
+ * Keeps a list of SplitfileBlocks for each retry level.
+ */
+public class RetryTracker {
+
+ static class Level {
+ final int level;
+ final Vector blocks;
+ final RetryTracker tracker;
+
+ Level(RetryTracker tracker, int l) {
+ level = l;
+ this.tracker = tracker;
+ blocks = new Vector();
+ }
+
+ /**
+ * Return a random block.
+ * Call synchronized on RetryTracker.
+ */
+ SplitfileBlock getBlock() {
+ int len = blocks.size();
+ int x = tracker.random.nextInt(len);
+ SplitfileBlock block = (SplitfileBlock)
blocks.remove(x);
+ if(blocks.isEmpty())
+ tracker.removeLevel(level);
+ return block;
+ }
+
+ void add(SplitfileBlock block) {
+ blocks.add(block);
+ }
+
+ /**
+ * Remove a specific block.
+ * Remove self if run out of blocks.
+ * Call synchronized on RetryTracker.
+ */
+ void remove(SplitfileBlock block) {
+ blocks.remove(block);
+ if(blocks.isEmpty())
+ tracker.removeLevel(level);
+ }
+ }
+
+ final HashMap levels;
+ final RandomSource random;
+ final int maxLevel;
+ final HashSet failedBlocksTooManyRetries;
+ final HashSet failedBlocksFatalErrors;
+ final HashSet runningBlocks;
+ final HashSet succeededBlocks;
+ private int curMaxLevel;
+ private int curMinLevel;
+
+ public RetryTracker(int maxLevel, RandomSource random) {
+ levels = new HashMap();
+ this.maxLevel = maxLevel;
+ this.random = random;
+ curMaxLevel = curMinLevel = 0;
+ failedBlocksTooManyRetries = new HashSet();
+ failedBlocksFatalErrors = new HashSet();
+ runningBlocks = new HashSet();
+ succeededBlocks = new HashSet();
+ }
+
+ /** Remove a level */
+ private synchronized void removeLevel(int level) {
+ Integer x = new Integer(level);
+ levels.remove(x);
+ if(curMinLevel == level) {
+ for(int i=curMinLevel;i<=curMaxLevel;i++) {
+ x = new Integer(i);
+ if(levels.get(x) != null) {
+ curMinLevel = i;
+ return;
+ }
+ }
+ curMinLevel = curMaxLevel = 0;
+ return;
+ }
+ if(curMaxLevel == level) {
+ for(int i=curMaxLevel;i>=curMinLevel;i--) {
+ x = new Integer(i);
+ if(levels.get(x) != null) {
+ curMaxLevel = i;
+ return;
+ }
+ }
+ curMinLevel = curMaxLevel = 0;
+ return;
+ }
+ }
+
+ /** Add a level */
+ private synchronized Level addLevel(int level, Integer x) {
+ if(level < 0) throw new IllegalArgumentException();
+ Level l = new Level(this, level);
+ levels.put(x, l);
+ if(level > curMaxLevel) curMaxLevel = level;
+ if(level < curMinLevel) curMinLevel = level;
+ return l;
+ }
+
+ /** Get an existing level, or add one if necessary */
+ private synchronized Level makeLevel(int level) {
+ Integer x = new Integer(level);
+ Level l = (Level) levels.get(x);
+ if(l == null) {
+ return addLevel(level, x);
+ }
+ else return l;
+ }
+
+ /**
+ * Add a block at retry level zero.
+ */
+ public synchronized void addBlock(SplitfileBlock block) {
+ Level l = makeLevel(0);
+ l.add(block);
+ }
+
+ /**
+ * A block got a nonfatal error and should be retried.
+ * Move it out of the running list and back into the relevant list,
unless
+ * we have run out of retries.
+ */
+ public synchronized void nonfatalError(SplitfileBlock block) {
+ runningBlocks.remove(block);
+ Level l = block.getLevel();
+ if(l == null) throw new IllegalArgumentException();
+ if(l.tracker != this) throw new
IllegalArgumentException("Belongs to wrong tracker");
+ int levelNumber = l.level;
+ l.remove(block);
+ levelNumber++;
+ if(levelNumber > maxLevel) {
+ failedBlocksTooManyRetries.add(block);
+ } else {
+ Level newLevel = makeLevel(levelNumber);
+ newLevel.add(block);
+ }
+ }
+
+ /**
+ * A block got a fatal error and should not be retried.
+ * Move it into the fatal error list.
+ */
+ public synchronized void fatalError(SplitfileBlock block) {
+ runningBlocks.remove(block);
+ Level l = block.getLevel();
+ if(l == null) throw new IllegalArgumentException();
+ if(l.tracker != this) throw new
IllegalArgumentException("Belongs to wrong tracker");
+ l.remove(block);
+ failedBlocksFatalErrors.add(block);
+ }
+
+ public synchronized void success(SplitfileBlock block) {
+ runningBlocks.remove(block);
+ succeededBlocks.add(block);
+ }
+
+ /**
+ * Get the next block to try. This is a randomly selected block from the
+ * lowest priority currently available. Move it into the running list.
+ */
+ public synchronized SplitfileBlock getBlock() {
+ Level l = (Level) levels.get(new Integer(curMinLevel));
+ return l.getBlock();
+ }
+
+ /**
+ * Get all running blocks.
+ */
+ public synchronized SplitfileBlock[] runningBlocks() {
+ return (SplitfileBlock[])
+ runningBlocks.toArray(new
SplitfileBlock[runningBlocks.size()]);
+ }
+
+ /**
+ * Get all blocks with fatal errors.
+ * SplitfileBlock's are assumed to remember their errors, so we don't.
+ */
+ public synchronized SplitfileBlock[] errorBlocks() {
+ return (SplitfileBlock[])
+ failedBlocksFatalErrors.toArray(new
SplitfileBlock[failedBlocksFatalErrors.size()]);
+ }
+
+ /**
+ * Get all successfully downloaded blocks.
+ */
+ public synchronized SplitfileBlock[] succeededBlocks() {
+ return (SplitfileBlock[])
+ succeededBlocks.toArray(new
SplitfileBlock[succeededBlocks.size()]);
+ }
+
+ /**
+ * Count the number of blocks which could not be fetched because we ran
out
+ * of retries.
+ */
+ public synchronized int countFailedBlocks() {
+ return failedBlocksTooManyRetries.size();
+ }
+
+ /**
+ * Highest number of completed retries of any block so far.
+ */
+ public synchronized int highestRetries() {
+ return curMaxLevel;
+ }
+
+ /**
+ * Lowest number of completed retries of any block so far.
+ */
+ public synchronized int lowestRetries() {
+ return curMinLevel;
+ }
+
+ /**
+ * Are there more blocks to process?
+ */
+ public synchronized boolean moreBlocks() {
+ return !levels.isEmpty();
+ }
+}
Modified: trunk/freenet/src/freenet/client/Segment.java
===================================================================
--- trunk/freenet/src/freenet/client/Segment.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/Segment.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -19,7 +19,7 @@
*/
public class Segment implements Runnable {
- public class BlockStatus implements Runnable, SplitfileBlock {
+ public class BlockStatus extends SplitfileBlock implements Runnable {
/** Splitfile index - [0,k[ is the data blocks, [k,n[ is the
check blocks */
final int index;
@@ -196,6 +196,8 @@
private final int recursionLevel;
/** Number of blocks which got fatal errors */
private int fatalErrorCount;
+ /** Retry tracker */
+ private RetryTracker tracker;
/**
* Create a Segment.
Modified: trunk/freenet/src/freenet/client/SplitFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -2,6 +2,7 @@
import java.io.IOException;
import java.io.OutputStream;
+import java.util.Vector;
import com.onionnetworks.fec.FECCode;
import com.onionnetworks.fec.FECCodeFactory;
@@ -48,9 +49,7 @@
/** Currently fetching segment */
private Segment fetchingSegment;
/** Array of unstarted segments. Modify synchronized. */
- private Segment[] unstartedSegments;
- /** Number of unstarted segments. Ditto. */
- private int unstartedSegmentsCount;
+ private Vector unstartedSegments;
/** Override length. If this is positive, truncate the splitfile to
this length. */
private long overrideLength;
/** Accept non-full splitfile chunks? */
@@ -100,8 +99,9 @@
segments[i] = new Segment(splitfileType,
dataBlocks, checkBlocks, this, archiveContext, ctx, maxTempLength,
splitUseLengths, blockLength);
}
}
- unstartedSegments = segments;
- unstartedSegmentsCount = segments.length;
+ unstartedSegments = new Vector();
+ for(int i=0;i<segments.length;i++)
+ unstartedSegments.add(segments[i]);
}
/**
@@ -132,7 +132,7 @@
if(s == null) {
// All segments have started
} else {
- start(s); // will keep
unstartedSegments up to date
+ s.start();
}
}
if(allSegmentsFinished) {
@@ -147,24 +147,15 @@
}
}
- private synchronized void start(Segment start) {
- start.start();
- int j = 0;
- for(int i=0;i<unstartedSegmentsCount;i++) {
- Segment s = unstartedSegments[i];
- if(!s.isStarted()) {
- unstartedSegments[j] = unstartedSegments[i];
- j++;
- }
+ private Segment chooseUnstartedSegment() {
+ synchronized(unstartedSegments) {
+ int x = fctx.random.nextInt(unstartedSegments.size());
+ Segment s = (Segment) unstartedSegments.get(x);
+ unstartedSegments.remove(x);
+ return s;
}
- unstartedSegmentsCount = j;
}
- private Segment chooseUnstartedSegment() {
- if(unstartedSegmentsCount == 0) return null;
- return
unstartedSegments[fctx.random.nextInt(unstartedSegmentsCount)];
- }
-
/** Return the final status of the fetch. Throws an exception, or
returns a
* Bucket containing the fetched data.
* @throws FetchException If the fetch failed for some reason.
Modified: trunk/freenet/src/freenet/client/SplitInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-09 15:03:58 UTC
(rev 7501)
+++ trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-09 16:09:35 UTC
(rev 7502)
@@ -1,25 +1,140 @@
package freenet.client;
+import java.util.Vector;
+
import freenet.keys.FreenetURI;
+import freenet.keys.NodeCHK;
import freenet.support.Bucket;
+import freenet.support.BucketTools;
+import freenet.support.compress.Compressor;
/**
* Insert a splitfile.
*/
public class SplitInserter {
- public SplitInserter(Bucket data, ClientMetadata clientMetadata) {
- // TODO Auto-generated constructor stub
+ final Bucket origData;
+ final long dataLength;
+ final ClientMetadata clientMetadata;
+ final short compressionCodec;
+ final short splitfileAlgorithm;
+ final InserterContext ctx;
+ SplitfileBlock[] origDataBlocks;
+
+ public SplitInserter(Bucket data, ClientMetadata clientMetadata,
Compressor compressor, short splitfileAlgorithm, InserterContext ctx) {
+ this.origData = data;
+ this.clientMetadata = clientMetadata;
+ if(compressor == null)
+ compressionCodec = -1;
+ else
+ compressionCodec = compressor.codecNumberForMetadata();
+ this.splitfileAlgorithm = splitfileAlgorithm;
+ this.ctx = ctx;
+ this.dataLength = data.size();
}
+ InsertSegment encodingSegment;
+ InsertSegment[] segments;
+ final Vector unstartedSegments = new Vector();
+ boolean allSegmentsFinished = false;
+
/**
* Inserts the splitfile.
* @return The URI of the resulting file.
*/
public FreenetURI run() {
- throw new UnsupportedOperationException();
+ // Create the splitfile
+ int segmentSize =
FECCodec.getCodecMaxSegmentSize(splitfileAlgorithm);
+
+ splitIntoBlocks();
+
+ splitIntoSegments(segmentSize);
+
+ // Encode the last segment (which is always shortest)
+
+ encodeSegment(segments.length-1);
+
+ // Then start the insertion thread
+
+ startInsertionThread();
+
+ // Then encode the rest
+
+ for(int i=0;i<segments.length-1;i++)
+ encodeSegment(i);
+
+ // Then wait for the insertion thread to finish
+
+ return waitForCompletion();
+ }
+
+ private void splitIntoBlocks() {
+ Bucket[] dataBuckets = BucketTools.split(origData,
NodeCHK.BLOCK_SIZE);
+ origDataBlocks = new SplitfileBlock[dataBuckets.length];
+ for(int i=0;i<origDataBlocks.length;i++) {
+ origDataBlocks[i] = new BucketWrapper(dataBuckets[i],
i);
+ }
+ }
+
+ /**
+ * Create the metadata document. Insert it. Return its URI.
+ */
+ private FreenetURI finalStatus() {
// TODO Auto-generated method stub
+ return null;
+ }
+
+ /**
+ * Group the blocks into segments.
+ */
+ private void splitIntoSegments(int segmentSize) {
+ int dataBlocks = origDataBlocks.length;
+
+ // First split the data up
+ if(dataBlocks < segmentSize || segmentSize == -1) {
+ // Single segment
+ InsertSegment onlySeg = new
InsertSegment(splitfileAlgorithm, origDataBlocks);
+ unstartedSegments.add(new InsertSegment[] { onlySeg });
+ } else {
+ int j = 0;
+ for(int i=segmentSize;;i+=segmentSize) {
+ if(i > dataBlocks) i = dataBlocks;
+ Bucket[] seg = new Bucket[i-j];
+ System.arraycopy(origDataBlocks, j, seg, 0,
i-j);
+ unstartedSegments.add(seg);
+ j = i;
+ if(i == dataBlocks) break;
+ }
+ }
+ segments = (InsertSegment[]) unstartedSegments.toArray(new
InsertSegment[unstartedSegments.size()]);
+ }
+
+ public static class BucketWrapper implements SplitfileBlock {
+
+ Bucket data;
+ int number;
+ public BucketWrapper(Bucket data, int number) {
+ this.data = data;
+ this.number = number;
+ }
+
+ public int getNumber() {
+ return number;
+ }
+
+ public boolean hasData() {
+ return data != null;
+ }
+
+ public Bucket getData() {
+ return data;
+ }
+
+ public void setData(Bucket data) {
+ this.data = data;
+ }
+
}
}
Modified: trunk/freenet/src/freenet/client/SplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitfileBlock.java 2005-11-09
15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/client/SplitfileBlock.java 2005-11-09
16:09:35 UTC (rev 7502)
@@ -1,20 +1,30 @@
package freenet.client;
+import freenet.client.RetryTracker.Level;
import freenet.support.Bucket;
/** Simple interface for a splitfile block */
-public interface SplitfileBlock {
+public abstract class SplitfileBlock {
/** Get block number. [0,k[ = data blocks, [k, n[ = check blocks */
- int getNumber();
+ abstract int getNumber();
/** Has data? */
- boolean hasData();
+ abstract boolean hasData();
/** Get data */
- Bucket getData();
+ abstract Bucket getData();
/** Set data */
- void setData(Bucket data);
+ abstract void setData(Bucket data);
+
+ private Level level;
+ final Level getLevel() {
+ return level;
+ }
+
+ final void setLevel(Level l) {
+ level = l;
+ }
}
Modified: trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-09
15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-09
16:09:35 UTC (rev 7502)
@@ -178,4 +178,8 @@
// TODO Auto-generated method stub
}
-}
\ No newline at end of file
+
+ public int countCheckBlocks() {
+ return n-k;
+ }
+}
Modified: trunk/freenet/src/freenet/support/compress/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Compressor.java 2005-11-09
15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2005-11-09
16:09:35 UTC (rev 7502)
@@ -16,6 +16,8 @@
public abstract Bucket compress(Bucket data, BucketFactory bf) throws
IOException;
+ public abstract Bucket decompress(Bucket data, BucketFactory
bucketFactory) throws IOException;
+
public short codecNumberForMetadata() {
return Metadata.COMPRESS_GZIP;
}
Modified: trunk/freenet/src/freenet/support/compress/GzipCompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2005-11-09 15:03:58 UTC (rev 7501)
+++ trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2005-11-09 16:09:35 UTC (rev 7502)
@@ -4,6 +4,7 @@
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.DataFormatException;
+import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import java.util.zip.Inflater;
@@ -25,10 +26,28 @@
if(x == 0) throw new IOException("Returned zero from
read()");
gos.write(buffer, 0, x);
}
+ is.close();
gos.close();
return output;
}
+ public Bucket decompress(Bucket data, BucketFactory bf) throws
IOException {
+ Bucket output = bf.makeBucket(-1);
+ InputStream is = data.getInputStream();
+ OutputStream os = output.getOutputStream();
+ GZIPInputStream gis = new GZIPInputStream(is);
+ byte[] buffer = new byte[4096];
+ while(true) {
+ int x = gis.read(buffer);
+ if(x <= -1) break;
+ if(x == 0) throw new IOException("Returned zero from
read()");
+ os.write(buffer, 0, x);
+ }
+ os.close();
+ gis.close();
+ return output;
+ }
+
public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
DecompressException {
Inflater decompressor = new Inflater();
decompressor.setInput(dbuf, i, j);