Author: toad
Date: 2008-06-13 17:19:35 +0000 (Fri, 13 Jun 2008)
New Revision: 20319
Modified:
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
branches/db4o/freenet/src/freenet/client/async/SingleFileInserter.java
branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
branches/db4o/freenet/src/freenet/client/async/SplitFileInserterSegment.java
Log:
More passing context around
Modified:
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
===================================================================
---
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
2008-06-13 17:16:32 UTC (rev 20318)
+++
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
2008-06-13 17:19:35 UTC (rev 20319)
@@ -178,7 +178,7 @@
// thread removes it cos its empty) ... and in addToGrabArray etc we
already sync on this.
// The worry is ... is there any nested locking outside of the
hierarchy?
ChosenRequest removeFirst(int fuzz, RandomSource random,
OfferedKeysList[] offeredKeys, RequestStarter starter,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
short maxPrio, int retryCount, ClientContext context) {
- SendableRequest req = removeFirstInner(fuzz, random,
offeredKeys, starter, schedTransient, transientOnly, maxPrio, retryCount);
+ SendableRequest req = removeFirstInner(fuzz, random,
offeredKeys, starter, schedTransient, transientOnly, maxPrio, retryCount,
context);
Object token = req.chooseKey(this, req.persistent() ? container
: null, context);
if(token == null) {
return null;
@@ -208,7 +208,7 @@
}
}
- SendableRequest removeFirstInner(int fuzz, RandomSource random,
OfferedKeysList[] offeredKeys, RequestStarter starter,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
short maxPrio, int retryCount) {
+ SendableRequest removeFirstInner(int fuzz, RandomSource random,
OfferedKeysList[] offeredKeys, RequestStarter starter,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
short maxPrio, int retryCount, ClientContext context) {
// Priorities start at 0
if(logMINOR) Logger.minor(this, "removeFirst()");
boolean tryOfferedKeys = offeredKeys != null &&
random.nextBoolean();
@@ -225,7 +225,7 @@
for(;choosenPriorityClass <=
RequestStarter.MINIMUM_PRIORITY_CLASS;choosenPriorityClass++) {
if(logMINOR) Logger.minor(this, "Using priority
"+choosenPriorityClass);
if(tryOfferedKeys) {
- if(offeredKeys[choosenPriorityClass].hasValidKeys(this,
null))
+ if(offeredKeys[choosenPriorityClass].hasValidKeys(this,
null, context))
return offeredKeys[choosenPriorityClass];
}
SortedVectorByNumber perm = null;
Modified: branches/db4o/freenet/src/freenet/client/async/SingleFileInserter.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-13 17:16:32 UTC (rev 20318)
+++ branches/db4o/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-13 17:19:35 UTC (rev 20319)
@@ -94,7 +94,7 @@
// If we succeed, we bypass both compression
and FEC encoding!
try {
SplitHandler sh = new SplitHandler();
- sh.start(fs, false);
+ sh.start(fs, false, context);
cb.onTransition(this, sh, container);
sh.schedule(container, context);
return;
@@ -338,7 +338,7 @@
* @throws InsertException Thrown if some other error prevents
the insert
* from starting.
*/
- void start(SimpleFieldSet fs, boolean forceMetadata) throws
ResumeException, InsertException {
+ void start(SimpleFieldSet fs, boolean forceMetadata,
ClientContext context) throws ResumeException, InsertException {
boolean meta = metadata || forceMetadata;
@@ -348,7 +348,7 @@
if(sfiFS == null)
throw new ResumeException("No
SplitFileInserter");
ClientPutState newSFI, newMetaPutter = null;
- newSFI = new SplitFileInserter(parent, this,
forceMetadata ? null : block.clientMetadata, ctx, getCHKOnly, meta, token,
insertAsArchiveManifest, sfiFS);
+ newSFI = new SplitFileInserter(parent, this,
forceMetadata ? null : block.clientMetadata, ctx, getCHKOnly, meta, token,
insertAsArchiveManifest, sfiFS, context);
if(logMINOR) Logger.minor(this, "Starting "+newSFI+"
for "+this);
fs.removeSubset("SplitFileInserter");
SimpleFieldSet metaFS = fs.subset("MetadataPutter");
@@ -358,10 +358,10 @@
if(type.equals("SplitFileInserter")) {
// FIXME
insertAsArchiveManifest ?!?!?!
newMetaPutter =
- new
SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, token,
insertAsArchiveManifest, metaFS);
+ new
SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, token,
insertAsArchiveManifest, metaFS, context);
} else if(type.equals("SplitHandler")) {
newMetaPutter = new
SplitHandler();
-
((SplitHandler)newMetaPutter).start(metaFS, true);
+
((SplitHandler)newMetaPutter).start(metaFS, true, context);
}
} catch (ResumeException e) {
newMetaPutter = null;
Modified: branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-13 17:16:32 UTC (rev 20318)
+++ branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-13 17:19:35 UTC (rev 20319)
@@ -108,7 +108,7 @@
parent.onMajorProgress();
}
- public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly,
boolean metadata, Object token, boolean insertAsArchiveManifest, SimpleFieldSet
fs) throws ResumeException {
+ public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly,
boolean metadata, Object token, boolean insertAsArchiveManifest, SimpleFieldSet
fs, ClientContext context) throws ResumeException {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.parent = parent;
this.insertAsArchiveManifest = insertAsArchiveManifest;
@@ -185,7 +185,7 @@
SimpleFieldSet segment = segFS.subset(index);
segFS.removeSubset(index);
if(segment == null) throw new ResumeException("No
segment "+i);
- segments[i] = new SplitFileInserterSegment(this,
segment, splitfileAlgorithm, ctx, getCHKOnly, i);
+ segments[i] = new SplitFileInserterSegment(this,
segment, splitfileAlgorithm, ctx, getCHKOnly, i, context);
dataBlocks += segments[i].countDataBlocks();
checkBlocks += segments[i].countCheckBlocks();
}
Modified:
branches/db4o/freenet/src/freenet/client/async/SplitFileInserterSegment.java
===================================================================
---
branches/db4o/freenet/src/freenet/client/async/SplitFileInserterSegment.java
2008-06-13 17:16:32 UTC (rev 20318)
+++
branches/db4o/freenet/src/freenet/client/async/SplitFileInserterSegment.java
2008-06-13 17:19:35 UTC (rev 20319)
@@ -95,7 +95,7 @@
*/
public SplitFileInserterSegment(SplitFileInserter parent,
SimpleFieldSet fs, short splitfileAlgorithm,
InsertContext ctx,
- boolean getCHKOnly, int segNo) throws ResumeException {
+ boolean getCHKOnly, int segNo, ClientContext context)
throws ResumeException {
this.parent = parent;
this.getCHKOnly = getCHKOnly;
this.blockInsertContext = ctx;
@@ -191,7 +191,7 @@
if (bucketFS != null) {
try {
checkBlocks[i] =
SerializableToFieldSetBucketUtil
-
.create(bucketFS, ctx.random,
+
.create(bucketFS, context.random,
ctx.persistentFileTracker);
if (logMINOR)
Logger.minor(this, "Check block " + i + " : "
@@ -281,7 +281,7 @@
} else {
try {
dataBlocks[i] =
SerializableToFieldSetBucketUtil.create(
- bucketFS, ctx.random,
ctx.persistentFileTracker);
+ bucketFS,
context.random, ctx.persistentFileTracker);
if (logMINOR)
Logger.minor(this, "Data block
" + i + " : "
+
dataBlocks[i]);