Author: nextgens
Date: 2007-03-24 02:18:15 +0000 (Sat, 24 Mar 2007)
New Revision: 12300
Modified:
trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
Log:
Second part of the patch: all the FEC encoding is now serialized.
next step: decoding
Modified: trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2007-03-24
01:49:08 UTC (rev 12299)
+++ trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2007-03-24
02:18:15 UTC (rev 12300)
@@ -250,6 +250,9 @@
}
}
+ /**
+ * @decapreted
+ */
public void encode(Bucket[] dataBlockStatus, Bucket[] checkBlockStatus,
int blockLength, BucketFactory bf) throws IOException {
logMINOR = Logger.shouldLog(Logger.MINOR, getClass());
if(logMINOR)
@@ -275,6 +278,9 @@
currentThread.setPriority(currentThreadPriority);
}
+ /**
+ * @decapreted
+ */
public void encode(SplitfileBlock[] dataBlockStatus, SplitfileBlock[]
checkBlockStatus, int blockLength, BucketFactory bf) throws IOException {
Bucket[] dataBlocks = new Bucket[dataBlockStatus.length];
Bucket[] checkBlocks = new Bucket[checkBlockStatus.length];
@@ -448,6 +454,20 @@
}
}
+ public void addToQueue(SplitfileBlock[] dataBlockStatus,
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory
bucketFactory, StandardOnionFECCodecEncoderCallback callback){
+ Bucket[] dataBlocks = new Bucket[dataBlockStatus.length];
+ Bucket[] checkBlocks = new Bucket[checkBlockStatus.length];
+ for(int i=0;i<dataBlocks.length;i++)
+ dataBlocks[i] = dataBlockStatus[i].getData();
+ for(int i=0;i<checkBlocks.length;i++)
+ checkBlocks[i] = checkBlockStatus[i].getData();
+ addToQueue(dataBlocks, checkBlocks, blockLength, bucketFactory,
callback);
+ for(int i=0;i<dataBlocks.length;i++)
+ dataBlockStatus[i].setData(dataBlocks[i]);
+ for(int i=0;i<checkBlocks.length;i++)
+ checkBlockStatus[i].setData(checkBlocks[i]);
+ }
+
private final LinkedList _awaitingJobs = new LinkedList();
private final FECRunner fecRunner = new FECRunner();
private Thread fecRunnerThread;
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-03-24 01:49:08 UTC (rev 12299)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-03-24 02:18:15 UTC (rev 12300)
@@ -15,6 +15,8 @@
import freenet.client.Metadata;
import freenet.client.MetadataParseException;
import freenet.client.SplitfileBlock;
+import freenet.client.StandardOnionFECCodec;
+import
freenet.client.StandardOnionFECCodec.StandardOnionFECCodecEncoderCallback;
import freenet.keys.CHKBlock;
import freenet.keys.ClientCHK;
import freenet.support.Logger;
@@ -179,7 +181,7 @@
}
}
- class Decoder implements Runnable {
+ class Decoder implements Runnable, StandardOnionFECCodecEncoderCallback
{
public void run() {
@@ -231,41 +233,39 @@
// Encode any check blocks we don't have
if(codec != null) {
- try {
- codec.encode(dataBuckets, checkBuckets,
32768, fetchContext.bucketFactory);
- } catch (IOException e) {
- Logger.error(this, "Bucket error while
healing: "+e, e);
+ StandardOnionFECCodec fec =
(StandardOnionFECCodec) codec;
+ fec.addToQueue(dataBuckets, checkBuckets,
32768, fetchContext.bucketFactory, this);
+ }
+ }
+
+ public void onEncodedSegment() {
+ // Now insert *ALL* blocks on which we had at least one
failure, and didn't eventually succeed
+ for(int i=0;i<dataBuckets.length;i++) {
+ boolean heal = false;
+ if(dataRetries[i] > 0)
+ heal = true;
+ if(heal) {
+ queueHeal(dataBuckets[i].getData());
+ } else {
+ dataBuckets[i].data.free();
+ dataBuckets[i].data = null;
}
-
- // Now insert *ALL* blocks on which we had at
least one failure, and didn't eventually succeed
- for(int i=0;i<dataBuckets.length;i++) {
- boolean heal = false;
- if(dataRetries[i] > 0)
- heal = true;
- if(heal) {
-
queueHeal(dataBuckets[i].getData());
- } else {
- dataBuckets[i].data.free();
- dataBuckets[i].data = null;
- }
- dataBuckets[i] = null;
- dataKeys[i] = null;
+ dataBuckets[i] = null;
+ dataKeys[i] = null;
+ }
+ for(int i=0;i<checkBuckets.length;i++) {
+ boolean heal = false;
+ if(checkRetries[i] > 0)
+ heal = true;
+ if(heal) {
+ queueHeal(checkBuckets[i].getData());
+ } else {
+ checkBuckets[i].data.free();
}
- for(int i=0;i<checkBuckets.length;i++) {
- boolean heal = false;
- if(checkRetries[i] > 0)
- heal = true;
- if(heal) {
-
queueHeal(checkBuckets[i].getData());
- } else {
- checkBuckets[i].data.free();
- }
- checkBuckets[i] = null;
- checkKeys[i] = null;
- }
+ checkBuckets[i] = null;
+ checkKeys[i] = null;
}
}
-
}
private void queueHeal(Bucket data) {