Author: toad
Date: 2007-02-17 01:15:15 +0000 (Sat, 17 Feb 2007)
New Revision: 11827
Added:
trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
Modified:
trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
Log:
More work on sub-segments.
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2007-02-17 00:45:51 UTC (rev 11826)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2007-02-17 01:15:15 UTC (rev 11827)
@@ -9,14 +9,13 @@
import freenet.client.ArchiveContext;
import freenet.client.ClientMetadata;
+import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
-import freenet.client.FetchContext;
import freenet.client.Metadata;
import freenet.client.MetadataParseException;
import freenet.keys.CHKBlock;
import freenet.keys.ClientCHK;
-import freenet.keys.FreenetURI;
import freenet.support.Fields;
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -111,7 +110,9 @@
} else throw new MetadataParseException("Unknown splitfile
format: "+splitfileType);
this.maxTempLength = fetchContext.maxTempLength;
if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Algorithm: "+splitfileType+",
blocks per segment: "+blocksPerSegment+", check blocks per segment:
"+checkBlocksPerSegment+", segments: "+segmentCount);
+ Logger.minor(this, "Algorithm: "+splitfileType+",
blocks per segment: "+blocksPerSegment+
+ ", check blocks per segment:
"+checkBlocksPerSegment+", segments: "+segmentCount+
+ ", data blocks:
"+splitfileDataBlocks.length+", check blocks: "+splitfileCheckBlocks);
segments = new SplitFileFetcherSegment[segmentCount]; //
initially null on all entries
if(segmentCount == 1) {
// splitfile* will be overwritten, this is bad
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-02-17 00:45:51 UTC (rev 11826)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2007-02-17 01:15:15 UTC (rev 11827)
@@ -5,6 +5,7 @@
import java.io.IOException;
import java.io.OutputStream;
+import java.util.Vector;
import freenet.client.ArchiveContext;
import freenet.client.FECCodec;
@@ -25,16 +26,17 @@
* A single segment within a SplitFileFetcher.
* This in turn controls a large number of SingleFileFetcher's.
*/
-public class SplitFileFetcherSegment implements GetCompletionCallback {
+public class SplitFileFetcherSegment {
private static boolean logMINOR;
final short splitfileType;
final ClientCHK[] dataKeys;
final ClientCHK[] checkKeys;
- final ClientGetState[] dataBlockStatus;
- final ClientGetState[] checkBlockStatus;
final MinimalSplitfileBlock[] dataBuckets;
final MinimalSplitfileBlock[] checkBuckets;
+ final int[] dataRetries;
+ final int[] checkRetries;
+ final Vector subSegments;
final int minFetched;
final SplitFileFetcher parentFetcher;
final ArchiveContext archiveContext;
@@ -53,7 +55,8 @@
private int fatallyFailedBlocks;
private int failedBlocks;
private int fetchedBlocks;
- private final FailureCodeTracker errors;
+ final FailureCodeTracker errors;
+ private boolean finishing;
public SplitFileFetcherSegment(short splitfileType, ClientCHK[]
splitfileDataBlocks, ClientCHK[] splitfileCheckBlocks, SplitFileFetcher
fetcher, ArchiveContext archiveContext, FetchContext fetchContext, long
maxTempLength, int recursionLevel) throws MetadataParseException,
FetchException {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
@@ -70,8 +73,6 @@
} else throw new MetadataParseException("Unknown splitfile
type"+splitfileType);
finished = false;
decodedData = null;
- dataBlockStatus = new ClientGetState[dataKeys.length];
- checkBlockStatus = new ClientGetState[checkKeys.length];
dataBuckets = new MinimalSplitfileBlock[dataKeys.length];
checkBuckets = new MinimalSplitfileBlock[checkKeys.length];
for(int i=0;i<dataBuckets.length;i++) {
@@ -79,6 +80,9 @@
}
for(int i=0;i<checkBuckets.length;i++)
checkBuckets[i] = new
MinimalSplitfileBlock(i+dataBuckets.length);
+ dataRetries = new int[dataKeys.length];
+ checkRetries = new int[checkKeys.length];
+ subSegments = new Vector();
this.fetchContext = fetchContext;
maxBlockLength = maxTempLength;
blockFetchContext = new FetchContext(fetchContext,
FetchContext.SPLITFILE_DEFAULT_BLOCK_MASK, true);
@@ -91,9 +95,13 @@
}
public synchronized boolean isFinished() {
- return finished;
+ return finished || parentFetcher.parent.isCancelled();
}
+ public synchronized boolean isFinishing() {
+ return isFinished() || finishing;
+ }
+
/** Throw a FetchException, if we have one. Else do nothing. */
public synchronized void throwError() throws FetchException {
if(failureException != null)
@@ -124,21 +132,14 @@
return fetchedBlocks;
}
- /** How many blocks have currently running requests? */
- public int runningBlocks() {
- // FIXME implement or throw out
- return 0;
- }
-
/** How many blocks failed permanently due to fatal errors? */
public synchronized int fatallyFailedBlocks() {
return fatallyFailedBlocks;
}
- public synchronized void onSuccess(FetchResult result, ClientGetState
state) {
+ public synchronized void onSuccess(FetchResult result, int blockNo) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(finished) return;
- int blockNo = (int) state.getToken();
+ if(isFinished()) return;
if(blockNo < dataKeys.length) {
if(dataKeys[blockNo] == null) {
Logger.error(this, "Block already finished:
"+blockNo);
@@ -165,15 +166,8 @@
synchronized(this) {
if(startedDecode) return;
startedDecode = true;
+ finishing = true;
}
- for(int i=0;i<dataBlockStatus.length;i++) {
- ClientGetState f = dataBlockStatus[i];
- if(f != null) f.cancel();
- }
- for(int i=0;i<checkBlockStatus.length;i++) {
- ClientGetState f = checkBlockStatus[i];
- if(f != null) f.cancel();
- }
Runnable r = new Decoder();
Thread t = new Thread(r, "Decoder for "+this);
t.setDaemon(true);
@@ -204,7 +198,7 @@
decodedData =
fetchContext.bucketFactory.makeBucket(-1);
if(logMINOR) Logger.minor(this, "Copying data
from data blocks");
OutputStream os = decodedData.getOutputStream();
- for(int i=0;i<dataBlockStatus.length;i++) {
+ for(int i=0;i<dataBuckets.length;i++) {
SplitfileBlock status = dataBuckets[i];
Bucket data = status.getData();
BucketTools.copyTo(data, os,
Long.MAX_VALUE);
@@ -239,13 +233,10 @@
}
// Now insert *ALL* blocks on which we had at
least one failure, and didn't eventually succeed
- for(int i=0;i<dataBlockStatus.length;i++) {
+ for(int i=0;i<dataBuckets.length;i++) {
boolean heal = false;
- if(!dataBlocksSucceeded[i]) {
- SimpleSingleFileFetcher sf =
(SimpleSingleFileFetcher) dataBlockStatus[i];
- if(sf.getRetryCount() > 0)
- heal = true;
- }
+ if(dataRetries[i] > 0)
+ heal = true;
if(heal) {
queueHeal(dataBuckets[i].getData());
} else {
@@ -253,23 +244,18 @@
dataBuckets[i].data = null;
}
dataBuckets[i] = null;
- dataBlockStatus[i] = null;
dataKeys[i] = null;
}
- for(int i=0;i<checkBlockStatus.length;i++) {
+ for(int i=0;i<checkBuckets.length;i++) {
boolean heal = false;
- if(!checkBlocksSucceeded[i]) {
- SimpleSingleFileFetcher sf =
(SimpleSingleFileFetcher) checkBlockStatus[i];
- if(sf.getRetryCount() > 0)
- heal = true;
- }
+ if(checkRetries[i] > 0)
+ heal = true;
if(heal) {
queueHeal(checkBuckets[i].getData());
} else {
checkBuckets[i].data.free();
}
checkBuckets[i] = null;
- checkBlockStatus[i] = null;
checkKeys[i] = null;
}
}
@@ -283,38 +269,71 @@
}
/** This is after any retries and therefore is either out-of-retries or
fatal */
- public synchronized void onFailure(FetchException e, ClientGetState
state) {
+ public synchronized void onFatalFailure(FetchException e, int blockNo) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
- int blockNo = (int) state.getToken();
- if(blockNo < dataKeys.length) {
- if(dataKeys[blockNo] == null) {
- Logger.error(this, "Block already finished:
"+blockNo);
+ synchronized(this) {
+ if(blockNo < dataKeys.length) {
+ if(dataKeys[blockNo] == null) {
+ Logger.error(this, "Block already
finished: "+blockNo);
+ return;
+ }
+ dataKeys[blockNo] = null;
+ } else if(blockNo < checkKeys.length + dataKeys.length)
{
+ blockNo -= dataKeys.length;
+ if(checkKeys[blockNo] == null) {
+ Logger.error(this, "Check block already
finished: "+blockNo);
+ return;
+ }
+ checkKeys[blockNo] = null;
+ } else
+ Logger.error(this, "Unrecognized block number:
"+blockNo, new Exception("error"));
+ // :(
+ if(logMINOR) Logger.minor(this, "Permanently failed
block: "+blockNo+" on "+this);
+ if(e.isFatal())
+ fatallyFailedBlocks++;
+ else
+ failedBlocks++;
+ if(failedBlocks + fatallyFailedBlocks <=
(dataKeys.length + checkKeys.length - minFetched))
return;
+ }
+ fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors));
+ }
+
+ /** A request has failed non-fatally, so the block may be retried */
+ public void onNonFatalFailure(FetchException e, int blockNo) {
+ int tries;
+ synchronized(this) {
+ if(blockNo < dataKeys.length) {
+ tries = dataRetries[blockNo]++;
+ if(dataRetries[blockNo] >
blockFetchContext.maxNonSplitfileRetries) {
+ onFatalFailure(e, blockNo);
+ return;
+ }
+ } else {
+ blockNo -= dataKeys.length;
+ tries = checkRetries[blockNo]++;
+ if(checkRetries[blockNo] >
blockFetchContext.maxNonSplitfileRetries) {
+ onFatalFailure(e, blockNo);
+ return;
+ }
}
- dataKeys[blockNo] = null;
- } else if(blockNo < checkKeys.length + dataKeys.length) {
- blockNo -= dataKeys.length;
- if(checkKeys[blockNo] == null) {
- Logger.error(this, "Check block already
finished: "+blockNo);
- return;
+ }
+ // If we are here we are going to retry
+ getSubSegment(tries).add(blockNo);
+ }
+
+ private SplitFileFetcherSubSegment getSubSegment(int retryCount) {
+ SplitFileFetcherSubSegment sub;
+ synchronized(this) {
+ for(int i=0;i<subSegments.size();i++) {
+ sub = (SplitFileFetcherSubSegment)
subSegments.get(i);
+ if(sub.retryCount == retryCount) return sub;
}
- checkKeys[blockNo] = null;
- } else
- Logger.error(this, "Unrecognized block number:
"+blockNo, new Exception("error"));
- // :(
- if(logMINOR) Logger.minor(this, "Permanently failed block:
"+state+" on "+this);
- if(e.isFatal())
- fatallyFailedBlocks++;
- else
- failedBlocks++;
- // FIXME this may not be accurate across all the retries?
- if(e.errorCodes != null)
- errors.merge(e.errorCodes);
- else
- errors.inc(new Integer(e.mode), state == null ? 1 :
((SingleFileFetcher)state).getRetryCount());
- if(failedBlocks + fatallyFailedBlocks > (dataKeys.length +
checkKeys.length - minFetched)) {
- fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors));
+ sub = new SplitFileFetcherSubSegment(this, retryCount);
+ subSegments.add(sub);
}
+ sub.schedule();
+ return sub;
}
private void fail(FetchException e) {
@@ -326,10 +345,7 @@
Logger.error(this, "Failing with "+e+" but
already started decode", e);
return;
}
- for(int i=0;i<dataBlockStatus.length;i++) {
- ClientGetState f = dataBlockStatus[i];
- if(f != null)
- f.cancel();
+ for(int i=0;i<dataBuckets.length;i++) {
MinimalSplitfileBlock b = dataBuckets[i];
if(b != null) {
Bucket d = b.getData();
@@ -337,10 +353,7 @@
}
dataBuckets[i] = null;
}
- for(int i=0;i<checkBlockStatus.length;i++) {
- ClientGetState f = checkBlockStatus[i];
- if(f != null)
- f.cancel();
+ for(int i=0;i<checkBuckets.length;i++) {
MinimalSplitfileBlock b = checkBuckets[i];
if(b != null) {
Bucket d = b.getData();
@@ -354,35 +367,11 @@
public void schedule() {
try {
- for(int i=0;i<dataKeys.length;i++) {
- if(dataKeys[i] == null) {
- // Already fetched?
- continue;
- }
- if(dataBlockStatus[i] != null) {
- Logger.error(this, "Scheduling twice?
dataBlockStatus["+i+"] = "+dataBlockStatus[i]);
- } else {
- dataBlockStatus[i] =
- new
SimpleSingleFileFetcher(dataKeys[i], blockFetchContext.maxNonSplitfileRetries,
blockFetchContext, parentFetcher.parent, this, true, i);
- }
- }
- for(int i=0;i<checkKeys.length;i++) {
- if(checkKeys[i] == null) {
- // Already fetched?
- continue;
- }
- if(checkBlockStatus[i] != null) {
- Logger.error(this, "Scheduling twice?
dataBlockStatus["+i+"] = "+checkBlockStatus[i]);
- } else checkBlockStatus[i] =
- new
SimpleSingleFileFetcher(checkKeys[i], blockFetchContext.maxNonSplitfileRetries,
blockFetchContext, parentFetcher.parent, this, true, i);
- }
- for(int i=0;i<dataKeys.length;i++) {
- if(dataBlockStatus[i] != null)
- dataBlockStatus[i].schedule();
- }
- for(int i=0;i<checkKeys.length;i++)
- if(checkBlockStatus[i] != null)
- checkBlockStatus[i].schedule();
+ SplitFileFetcherSubSegment seg = getSubSegment(0);
+ for(int
i=0;i<dataRetries.length+checkRetries.length;i++)
+ seg.add(i);
+
+ seg.schedule();
} catch (Throwable t) {
Logger.error(this, "Caught "+t+" scheduling "+this, t);
fail(new FetchException(FetchException.INTERNAL_ERROR,
t));
@@ -402,10 +391,19 @@
}
public ClientCHK getBlockKey(int blockNum) {
- if(blockNum > dataKeys.length)
- return checkKeys[blockNum - dataKeys.length];
- else
+ if(blockNum < dataKeys.length)
return dataKeys[blockNum];
+ else
+ return checkKeys[blockNum - dataKeys.length];
}
+ public synchronized void removeSeg(SplitFileFetcherSubSegment segment) {
+ for(int i=0;i<subSegments.size();i++) {
+ if(segment.equals(subSegments.get(i))) {
+ subSegments.remove(i);
+ i--;
+ }
+ }
+ }
+
}
Added: trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
2007-02-17 01:15:15 UTC (rev 11827)
@@ -0,0 +1,208 @@
+package freenet.client.async;
+
+import java.io.IOException;
+import java.util.Vector;
+
+import freenet.client.ClientMetadata;
+import freenet.client.FetchContext;
+import freenet.client.FetchException;
+import freenet.client.FetchResult;
+import freenet.keys.ClientKey;
+import freenet.keys.ClientKeyBlock;
+import freenet.keys.KeyDecodeException;
+import freenet.node.LowLevelGetException;
+import freenet.node.SendableGet;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+
+/**
+ * A sub-segment of a segment of a splitfile being fetched.
+ * Collects together all requests within that segment at a given retry level.
+ * Registered on the ClientRequestScheduler instead of
SimpleSingleFileFetcher's.
+ * When CRS asks it to run a request, returns one, and only unregisters if no
more requests in this category.
+ */
+public class SplitFileFetcherSubSegment extends SendableGet {
+
+ final int retryCount;
+ final SplitFileFetcherSegment segment;
+ final Vector blockNums;
+ final FetchContext ctx;
+
+ SplitFileFetcherSubSegment(SplitFileFetcherSegment segment, int
retryCount) {
+ super(segment.parentFetcher.parent);
+ this.segment = segment;
+ this.retryCount = retryCount;
+ ctx = segment.blockFetchContext;
+ blockNums = new Vector();
+ }
+
+ public boolean dontCache() {
+ return !ctx.cacheLocalRequests;
+ }
+
+ public FetchContext getContext() {
+ return ctx;
+ }
+
+ public int chooseKey() {
+ if(segment.isFinishing()) return -1;
+ return removeRandomBlockNum();
+ }
+
+ public ClientKey getKey(int token) {
+ if(segment.isFinishing()) return null;
+ return segment.getBlockKey(token);
+ }
+
+ public synchronized int[] allKeys() {
+ int[] nums = new int[blockNums.size()];
+ for(int i=0;i<nums.length;i++)
+ nums[i] = ((Integer) blockNums.get(i)).intValue();
+ return nums;
+ }
+
+ private synchronized int removeRandomBlockNum() {
+ if(blockNums.isEmpty()) return -1;
+ int x = ctx.random.nextInt(blockNums.size());
+ return ((Integer) blockNums.remove(x)).intValue();
+ }
+
+ public boolean ignoreStore() {
+ return ctx.ignoreStore;
+ }
+
+ // Translate it, then call the real onFailure
+ // FIXME refactor this out to a common method; see
SimpleSingleFileFetcher
+ public void onFailure(LowLevelGetException e, int token) {
+ switch(e.code) {
+ case LowLevelGetException.DATA_NOT_FOUND:
+ onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), token);
+ return;
+ case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
+ onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), token);
+ return;
+ case LowLevelGetException.DECODE_FAILED:
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), token);
+ return;
+ case LowLevelGetException.INTERNAL_ERROR:
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR), token);
+ return;
+ case LowLevelGetException.REJECTED_OVERLOAD:
+ onFailure(new
FetchException(FetchException.REJECTED_OVERLOAD), token);
+ return;
+ case LowLevelGetException.ROUTE_NOT_FOUND:
+ onFailure(new
FetchException(FetchException.ROUTE_NOT_FOUND), token);
+ return;
+ case LowLevelGetException.TRANSFER_FAILED:
+ onFailure(new
FetchException(FetchException.TRANSFER_FAILED), token);
+ return;
+ case LowLevelGetException.VERIFY_FAILED:
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), token);
+ return;
+ case LowLevelGetException.CANCELLED:
+ onFailure(new FetchException(FetchException.CANCELLED),
token);
+ return;
+ default:
+ Logger.error(this, "Unknown LowLevelGetException code:
"+e.code);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR), token);
+ return;
+ }
+ }
+
+ // Real onFailure
+ protected void onFailure(FetchException e, int token) {
+ boolean forceFatal = false;
+ if(parent.isCancelled()) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Failing: cancelled");
+ e = new FetchException(FetchException.CANCELLED);
+ forceFatal = true;
+ }
+ segment.errors.inc(e.getMode());
+ if(!(e.isFatal() || forceFatal) ) {
+ parent.fatallyFailedBlock();
+ segment.onFatalFailure(e, token);
+ } else {
+ parent.failedBlock();
+ segment.onNonFatalFailure(e, token);
+ }
+ }
+
+ public void onSuccess(ClientKeyBlock block, boolean fromStore, int
token) {
+ Bucket data = extract(block, token);
+ if(data == null) return; // failed
+ if(!block.isMetadata()) {
+ onSuccess(new FetchResult((ClientMetadata)null, data),
token);
+ } else {
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), token);
+ }
+ }
+
+ /** Will be overridden by SingleFileFetcher */
+ protected void onSuccess(FetchResult data, int blockNo) {
+ if(parent.isCancelled()) {
+ data.asBucket().free();
+ onFailure(new FetchException(FetchException.CANCELLED),
blockNo);
+ return;
+ }
+ segment.onSuccess(data, blockNo);
+ }
+
+ /** Convert a ClientKeyBlock to a Bucket. If an error occurs, report it
via onFailure
+ * and return null.
+ */
+ protected Bucket extract(ClientKeyBlock block, int token) {
+ Bucket data;
+ try {
+ data = block.decode(ctx.bucketFactory,
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false);
+ } catch (KeyDecodeException e1) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Decode failure: "+e1, e1);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), token);
+ return null;
+ } catch (IOException e) {
+ Logger.error(this, "Could not capture data - disk
full?: "+e, e);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), token);
+ return null;
+ }
+ return data;
+ }
+
+ public Object getClient() {
+ return segment.parentFetcher.parent.getClient();
+ }
+
+ public ClientRequester getClientRequest() {
+ return segment.parentFetcher.parent;
+ }
+
+ public short getPriorityClass() {
+ return segment.parentFetcher.parent.priorityClass;
+ }
+
+ public int getRetryCount() {
+ return retryCount;
+ }
+
+ public synchronized boolean canRemove() {
+ if(blockNums.isEmpty()) {
+ segment.removeSeg(this);
+ return true;
+ } else return false;
+ }
+
+ public boolean isCancelled() {
+ return segment.isFinished();
+ }
+
+ public boolean isSSK() {
+ // Not allowed in splitfiles
+ return false;
+ }
+
+ public synchronized void add(int blockNo) {
+ Integer i = new Integer(blockNo);
+ blockNums.add(i);
+ }
+
+}
Modified: trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
2007-02-17 00:45:51 UTC (rev 11826)
+++ trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
2007-02-17 01:15:15 UTC (rev 11827)
@@ -375,16 +375,18 @@
bandwidthList.addChild("li", "Total
Input:\u00a0" + SizeUtil.formatSize(total[1]) + " (" +
SizeUtil.formatSize(total_input_rate, true) + "ps)");
long[] rate = node.getNodeIOStats();
long delta = (rate[5] - rate[2]) / 1000;
- long output_rate = (rate[3] - rate[0]) / delta;
- long input_rate = (rate[4] - rate[1]) / delta;
- SubConfig nodeConfig = node.config.get("node");
- int outputBandwidthLimit =
nodeConfig.getInt("outputBandwidthLimit");
- int inputBandwidthLimit =
nodeConfig.getInt("inputBandwidthLimit");
- if(inputBandwidthLimit == -1) {
- inputBandwidthLimit =
outputBandwidthLimit * 4;
+ if(delta > 0) {
+ long output_rate = (rate[3] - rate[0])
/ delta;
+ long input_rate = (rate[4] - rate[1]) /
delta;
+ SubConfig nodeConfig =
node.config.get("node");
+ int outputBandwidthLimit =
nodeConfig.getInt("outputBandwidthLimit");
+ int inputBandwidthLimit =
nodeConfig.getInt("inputBandwidthLimit");
+ if(inputBandwidthLimit == -1) {
+ inputBandwidthLimit =
outputBandwidthLimit * 4;
+ }
+ bandwidthList.addChild("li", "Output
Rate:\u00a0" + SizeUtil.formatSize(output_rate, true) + "ps
(of\u00a0"+SizeUtil.formatSize(outputBandwidthLimit, true)+"ps)");
+ bandwidthList.addChild("li", "Input
Rate:\u00a0" + SizeUtil.formatSize(input_rate, true) + "ps
(of\u00a0"+SizeUtil.formatSize(inputBandwidthLimit, true)+"ps)");
}
- bandwidthList.addChild("li", "Output
Rate:\u00a0" + SizeUtil.formatSize(output_rate, true) + "ps
(of\u00a0"+SizeUtil.formatSize(outputBandwidthLimit, true)+"ps)");
- bandwidthList.addChild("li", "Input
Rate:\u00a0" + SizeUtil.formatSize(input_rate, true) + "ps
(of\u00a0"+SizeUtil.formatSize(inputBandwidthLimit, true)+"ps)");
nextTableCell = overviewTableRow.addChild("td");
// store size box
Modified: trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
===================================================================
--- trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
2007-02-17 00:45:51 UTC (rev 11826)
+++ trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
2007-02-17 01:15:15 UTC (rev 11827)
@@ -58,14 +58,19 @@
}
public synchronized RandomGrabArrayItem removeRandom() {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
while(true) {
if(grabArrays.length == 0) return null;
int x = rand.nextInt(grabArrays.length);
RemoveRandomWithClient rga = grabArrays[x];
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(logMINOR)
Logger.minor(this, "Picked "+x+" of
"+grabArrays.length+" : "+rga+" : "+rga.getClient());
RandomGrabArrayItem item = rga.removeRandom();
+ if(logMINOR)
+ Logger.minor(this, "RGA has picked "+item);
if(rga.isEmpty() || (item == null)) {
+ if(logMINOR)
+ Logger.minor(this, "Removing "+x);
Object client = rga.getClient();
grabArraysByClient.remove(client);
RemoveRandomWithClient[] newArray = new
RemoveRandomWithClient[grabArrays.length-1];