Author: toad
Date: 2006-01-20 19:03:11 +0000 (Fri, 20 Jan 2006)
New Revision: 7885
Added:
branches/async-client-layer/
branches/async-client-layer/devnotes/specs/fcp.txt
branches/async-client-layer/src/freenet/client/BlockFetcher.java
branches/async-client-layer/src/freenet/client/FailureCodeTracker.java
branches/async-client-layer/src/freenet/client/FetchException.java
branches/async-client-layer/src/freenet/client/Fetcher.java
branches/async-client-layer/src/freenet/client/FetcherContext.java
branches/async-client-layer/src/freenet/client/HighLevelSimpleClient.java
branches/async-client-layer/src/freenet/client/HighLevelSimpleClientImpl.java
branches/async-client-layer/src/freenet/client/InserterContext.java
branches/async-client-layer/src/freenet/client/InserterException.java
branches/async-client-layer/src/freenet/client/Segment.java
branches/async-client-layer/src/freenet/client/SplitFetcher.java
branches/async-client-layer/src/freenet/client/StartableSplitfileBlock.java
branches/async-client-layer/src/freenet/clients/http/FproxyToadlet.java
branches/async-client-layer/src/freenet/node/Node.java
branches/async-client-layer/src/freenet/node/QueuedDataRequest.java
branches/async-client-layer/src/freenet/node/QueueingSimpleLowLevelClient.java
branches/async-client-layer/src/freenet/node/RealNodeRequestInsertTest.java
branches/async-client-layer/src/freenet/node/RequestHandler.java
branches/async-client-layer/src/freenet/node/RequestStarterClient.java
branches/async-client-layer/src/freenet/node/SimpleLowLevelClient.java
branches/async-client-layer/src/freenet/node/TextModeClientInterface.java
branches/async-client-layer/src/freenet/node/Version.java
branches/async-client-layer/src/freenet/node/fcp/
branches/async-client-layer/src/freenet/node/fcp/ClientGet.java
branches/async-client-layer/src/freenet/node/fcp/ClientGetMessage.java
branches/async-client-layer/src/freenet/node/fcp/ClientHelloMessage.java
branches/async-client-layer/src/freenet/node/fcp/ClientPut.java
branches/async-client-layer/src/freenet/node/fcp/ClientPutMessage.java
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionHandler.java
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionInputHandler.java
branches/async-client-layer/src/freenet/node/fcp/FCPMessage.java
branches/async-client-layer/src/freenet/node/fcp/GetFailedMessage.java
branches/async-client-layer/src/freenet/node/fcp/PutFailedMessage.java
branches/async-client-layer/src/freenet/node/fcp/PutSuccessfulMessage.java
branches/async-client-layer/src/freenet/support/BucketTools.java
branches/async-client-layer/src/freenet/support/SimpleFieldSet.java
branches/async-client-layer/src/freenet/support/io/LineReader.java
branches/async-client-layer/src/freenet/support/io/LineReadingInputStream.java
Removed:
branches/async-client-layer/src/freenet/client/BlockFetcher.java
branches/async-client-layer/src/freenet/client/FailureCodeTracker.java
branches/async-client-layer/src/freenet/client/FetchException.java
branches/async-client-layer/src/freenet/client/Fetcher.java
branches/async-client-layer/src/freenet/client/FetcherContext.java
branches/async-client-layer/src/freenet/client/HighLevelSimpleClient.java
branches/async-client-layer/src/freenet/client/HighLevelSimpleClientImpl.java
branches/async-client-layer/src/freenet/client/InserterContext.java
branches/async-client-layer/src/freenet/client/InserterException.java
branches/async-client-layer/src/freenet/client/Segment.java
branches/async-client-layer/src/freenet/client/SplitFetcher.java
branches/async-client-layer/src/freenet/client/SplitfileBlock.java
branches/async-client-layer/src/freenet/clients/http/FproxyToadlet.java
branches/async-client-layer/src/freenet/node/Node.java
branches/async-client-layer/src/freenet/node/QueuedDataRequest.java
branches/async-client-layer/src/freenet/node/QueueingSimpleLowLevelClient.java
branches/async-client-layer/src/freenet/node/RealNodeRequestInsertTest.java
branches/async-client-layer/src/freenet/node/RequestHandler.java
branches/async-client-layer/src/freenet/node/RequestStarterClient.java
branches/async-client-layer/src/freenet/node/SimpleLowLevelClient.java
branches/async-client-layer/src/freenet/node/TextModeClientInterface.java
branches/async-client-layer/src/freenet/node/Version.java
branches/async-client-layer/src/freenet/node/fcp/ClientGet.java
branches/async-client-layer/src/freenet/node/fcp/ClientGetMessage.java
branches/async-client-layer/src/freenet/node/fcp/ClientHelloMessage.java
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionHandler.java
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionInputHandler.java
branches/async-client-layer/src/freenet/node/fcp/FCPMessage.java
branches/async-client-layer/src/freenet/support/BucketTools.java
branches/async-client-layer/src/freenet/support/SimpleFieldSet.java
branches/async-client-layer/src/freenet/support/io/LineReadingInputStream.java
Modified:
branches/async-client-layer/src/freenet/client/ArchiveHandler.java
branches/async-client-layer/src/freenet/client/ArchiveStoreContext.java
branches/async-client-layer/src/freenet/client/ClientMetadata.java
branches/async-client-layer/src/freenet/client/FECCodec.java
branches/async-client-layer/src/freenet/client/InsertSegment.java
branches/async-client-layer/src/freenet/client/Metadata.java
branches/async-client-layer/src/freenet/client/RetryTracker.java
branches/async-client-layer/src/freenet/client/RetryTrackerCallback.java
branches/async-client-layer/src/freenet/client/SplitInserter.java
branches/async-client-layer/src/freenet/client/StdSplitfileBlock.java
Log:
New branch:
Major refactoring of client layer. Objectives:
1. More efficiency when handling larger numbers of fetches/puts. (Less threads,
don't have to fetch one whole file at a time etc).
2. Architecture suitable to storing and restarting fetches/puts. I.e.
persistent fetches/puts (over restarts).
Copied: branches/async-client-layer (from rev 7871, trunk/freenet)
Copied: branches/async-client-layer/devnotes/specs/fcp.txt (from rev 7876,
trunk/freenet/devnotes/specs/fcp.txt)
Modified: branches/async-client-layer/src/freenet/client/ArchiveHandler.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveHandler.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/ArchiveHandler.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -6,7 +6,7 @@
* The public face (to Fetcher, for example) of ArchiveStoreContext.
* Just has methods for fetching stuff.
*/
-interface ArchiveHandler {
+public interface ArchiveHandler {
/**
* Get the metadata for this ZIP manifest, as a Bucket.
@@ -36,4 +36,9 @@
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;
+ /**
+ * Get the archive type.
+ */
+ public abstract short getArchiveType();
+
}
\ No newline at end of file
Modified:
branches/async-client-layer/src/freenet/client/ArchiveStoreContext.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveStoreContext.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/ArchiveStoreContext.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -12,8 +12,11 @@
* subject to the above.
*
* Always take the lock on ArchiveStoreContext before the lock on
ArchiveManager, NOT the other way around.
+ *
+ * Not normally to be used directly by external packages, but public for
+ * ArchiveManager.extractToCache. FIXME.
*/
-class ArchiveStoreContext implements ArchiveHandler {
+public class ArchiveStoreContext implements ArchiveHandler {
private ArchiveManager manager;
private FreenetURI key;
@@ -65,6 +68,7 @@
// Not in cache
if(fetchContext == null) return null;
+ fetchContext = new FetcherContext(fetchContext,
FetcherContext.SET_RETURN_ARCHIVES);
Fetcher fetcher = new Fetcher(key, fetchContext,
archiveContext);
FetchResult result = fetcher.realRun(dm,
recursionLevel, key, dontEnterImplicitArchives, fetchContext.localRequestOnly);
manager.extractToCache(key, archiveType, result.data,
archiveContext, this);
@@ -130,5 +134,9 @@
myItems.remove(item);
}
}
+
+ public short getArchiveType() {
+ return archiveType;
+ }
}
Deleted: branches/async-client-layer/src/freenet/client/BlockFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/BlockFetcher.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/BlockFetcher.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,148 +0,0 @@
-/**
- *
- */
-package freenet.client;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-public class BlockFetcher extends StdSplitfileBlock {
-
- private final Segment segment;
- final FreenetURI uri;
- final boolean dontEnterImplicitArchives;
- int completedTries;
- boolean actuallyFetched;
-
- public BlockFetcher(Segment segment, RetryTracker tracker, FreenetURI
freenetURI, int index, boolean dontEnterImplicitArchives) {
- super(tracker, index, null);
- this.segment = segment;
- uri = freenetURI;
- completedTries = 0;
- fetchedData = null;
- actuallyFetched = false;
- this.dontEnterImplicitArchives = dontEnterImplicitArchives;
- }
-
- public String getName() {
- return "BlockFetcher for "+getNumber();
- }
-
- public void run() {
- Logger.minor(this, "Running: "+this);
- // Already added to runningFetches.
- // But need to make sure we are removed when we exit.
- try {
- realRun();
- } catch (Throwable t) {
- fatalError(t, FetchException.INTERNAL_ERROR);
- } finally {
- completedTries++;
- }
- }
-
- public String toString() {
- return super.toString()+" tries="+completedTries+" uri="+uri;
- }
-
- private void realRun() {
- // Do the fetch
- Fetcher f = new Fetcher(uri, this.segment.blockFetchContext);
- try {
- FetchResult fr = f.realRun(new ClientMetadata(),
segment.recursionLevel, uri,
- (!this.segment.nonFullBlocksAllowed) ||
dontEnterImplicitArchives, segment.blockFetchContext.localRequestOnly ||
completedTries == 0);
- actuallyFetched = true;
- fetchedData = fr.data;
- Logger.minor(this, "Fetched "+fetchedData.size()+"
bytes on "+this);
- tracker.success(this);
- } catch (MetadataParseException e) {
- fatalError(e, FetchException.INVALID_METADATA);
- } catch (FetchException e) {
- int code = e.getMode();
- switch(code) {
- case FetchException.ARCHIVE_FAILURE:
- case FetchException.BLOCK_DECODE_ERROR:
- case FetchException.HAS_MORE_METASTRINGS:
- case FetchException.INVALID_METADATA:
- case FetchException.NOT_IN_ARCHIVE:
- case FetchException.TOO_DEEP_ARCHIVE_RECURSION:
- case FetchException.TOO_MANY_ARCHIVE_RESTARTS:
- case FetchException.TOO_MANY_METADATA_LEVELS:
- case FetchException.TOO_MANY_REDIRECTS:
- case FetchException.TOO_MUCH_RECURSION:
- case FetchException.UNKNOWN_METADATA:
- case FetchException.UNKNOWN_SPLITFILE_METADATA:
- // Fatal, probably an error on insert
- fatalError(e, code);
- return;
-
- case FetchException.DATA_NOT_FOUND:
- case FetchException.ROUTE_NOT_FOUND:
- case FetchException.REJECTED_OVERLOAD:
- case FetchException.TRANSFER_FAILED:
- // Non-fatal
- nonfatalError(e, code);
- return;
-
- case FetchException.BUCKET_ERROR:
- case FetchException.INTERNAL_ERROR:
- // Maybe fatal
- nonfatalError(e, code);
- return;
- }
- } catch (ArchiveFailureException e) {
- fatalError(e, FetchException.ARCHIVE_FAILURE);
- } catch (ArchiveRestartException e) {
- Logger.error(this, "Got an ArchiveRestartException in a
splitfile - WTF?");
- fatalError(e, FetchException.ARCHIVE_FAILURE);
- }
- }
-
- private void fatalError(Throwable e, int code) {
- Logger.error(this, "Giving up on block: "+this+": "+e, e);
- tracker.fatalError(this, code);
- }
-
- private void nonfatalError(Exception e, int code) {
- Logger.minor(this, "Non-fatal error on "+this+": "+e);
- tracker.nonfatalError(this, code);
- }
-
- public boolean succeeded() {
- return fetchedData != null;
- }
-
- /**
- * Queue a healing block for insert.
- * Will be implemented using the download manager.
- * FIXME: implement!
- */
- public void queueHeal() {
- // TODO Auto-generated method stub
-
- }
-
- public void kill() {
- // Do nothing, for now
- }
-
- public FreenetURI getURI() {
- return uri;
- }
-
- public void setData(Bucket data) {
- actuallyFetched = false;
- super.setData(data);
- }
-
- protected void checkStartable() {
- if(fetchedData != null) {
- throw new IllegalStateException("Already have data");
- }
- }
-
- public int getRetryCount() {
- return completedTries;
- }
-}
\ No newline at end of file
Copied: branches/async-client-layer/src/freenet/client/BlockFetcher.java (from
rev 7873, trunk/freenet/src/freenet/client/BlockFetcher.java)
Modified: branches/async-client-layer/src/freenet/client/ClientMetadata.java
===================================================================
--- trunk/freenet/src/freenet/client/ClientMetadata.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/ClientMetadata.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -13,7 +13,7 @@
}
/** Create an empty ClientMetadata instance */
- ClientMetadata() {
+ public ClientMetadata() {
mimeType = null;
}
Modified: branches/async-client-layer/src/freenet/client/FECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/FECCodec.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/FECCodec.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -11,7 +11,7 @@
* @author root
*
*/
-abstract class FECCodec {
+public abstract class FECCodec {
/**
* Get a codec where we know both the number of data blocks and the
number
Deleted: branches/async-client-layer/src/freenet/client/FailureCodeTracker.java
===================================================================
--- trunk/freenet/src/freenet/client/FailureCodeTracker.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/FailureCodeTracker.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,66 +0,0 @@
-package freenet.client;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-
-/**
- * Essentially a map of integer to incrementible integer.
- * FIXME maybe move this to support, give it a better name?
- */
-public class FailureCodeTracker {
-
- public final boolean insert;
-
- public FailureCodeTracker(boolean insert) {
- this.insert = insert;
- }
-
- public class Item {
- int x;
- }
-
- final HashMap map = new HashMap();
-
- public synchronized void inc(int k) {
- Integer key = new Integer(k);
- Item i = (Item) map.get(key);
- if(i == null)
- map.put(key, i = new Item());
- i.x++;
- }
-
- public synchronized void inc(Integer key, int val) {
- Item i = (Item) map.get(key);
- if(i == null)
- map.put(key, i = new Item());
- i.x+=val;
- }
-
- public synchronized String toVerboseString() {
- StringBuffer sb = new StringBuffer();
- Collection values = map.keySet();
- Iterator i = values.iterator();
- while(i.hasNext()) {
- Integer x = (Integer) i.next();
- Item val = (Item) map.get(x);
- String s = insert ?
InserterException.getMessage(x.intValue()) :
FetchException.getMessage(x.intValue());
- sb.append(val.x);
- sb.append('\t');
- sb.append(s);
- sb.append('\n');
- }
- return sb.toString();
- }
-
- public synchronized FailureCodeTracker merge(FailureCodeTracker
accumulatedFatalErrorCodes) {
- Iterator keys = map.keySet().iterator();
- while(keys.hasNext()) {
- Integer k = (Integer) keys.next();
- Item item = (Item) map.get(k);
- inc(k, item.x);
- }
- return this;
- }
-
-}
Copied: branches/async-client-layer/src/freenet/client/FailureCodeTracker.java
(from rev 7873, trunk/freenet/src/freenet/client/FailureCodeTracker.java)
Deleted: branches/async-client-layer/src/freenet/client/FetchException.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/FetchException.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,176 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-
-import freenet.support.Logger;
-
-/**
- * Generic exception thrown by a Fetcher. All other exceptions are converted
to one of
- * these to tell the client.
- */
-public class FetchException extends Exception {
-
- private static final long serialVersionUID = -1106716067841151962L;
-
- public final int mode;
-
- /** For collection errors */
- public final FailureCodeTracker errorCodes;
-
- /** Get the failure mode. */
- public int getMode() {
- return mode;
- }
-
- public FetchException(int m) {
- super(getMessage(m));
- mode = m;
- errorCodes = null;
- Logger.minor(this, "FetchException("+getMessage(mode)+")",
this);
- }
-
- public FetchException(MetadataParseException e) {
- super(getMessage(INVALID_METADATA)+": "+e.getMessage());
- mode = INVALID_METADATA;
- errorCodes = null;
- initCause(e);
- Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
- }
-
- public FetchException(ArchiveFailureException e) {
- super(getMessage(INVALID_METADATA)+": "+e.getMessage());
- mode = ARCHIVE_FAILURE;
- errorCodes = null;
- initCause(e);
- Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
- }
-
- public FetchException(int mode, Throwable t) {
- super(getMessage(mode)+": "+t.getMessage());
- this.mode = mode;
- errorCodes = null;
- initCause(t);
- Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+t.getMessage(),t);
- }
-
- public FetchException(int mode, FailureCodeTracker errorCodes) {
- super(getMessage(mode));
- this.mode = mode;
- this.errorCodes = errorCodes;
- Logger.minor(this, "FetchException("+getMessage(mode)+")");
-
- }
-
- public FetchException(int mode, String msg) {
- super(getMessage(mode)+": "+msg);
- errorCodes = null;
- this.mode = mode;
- Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+msg,this);
- }
-
- public static String getMessage(int mode) {
- switch(mode) {
- case TOO_DEEP_ARCHIVE_RECURSION:
- return "Too many levels of recursion into archives";
- case UNKNOWN_SPLITFILE_METADATA:
- return "Don't know what to do with splitfile";
- case TOO_MANY_REDIRECTS:
- return "Too many redirects - loop?";
- case UNKNOWN_METADATA:
- return "Don't know what to do with metadata";
- case INVALID_METADATA:
- return "Failed to parse metadata";
- case ARCHIVE_FAILURE:
- return "Failure in extracting files from an archive";
- case BLOCK_DECODE_ERROR:
- return "Failed to decode a splitfile block";
- case TOO_MANY_METADATA_LEVELS:
- return "Too many levels of split metadata";
- case TOO_MANY_ARCHIVE_RESTARTS:
- return "Request was restarted too many times due to
archives changing";
- case TOO_MUCH_RECURSION:
- return "Too many redirects (too much recursion)"; //
FIXME: ???
- case NOT_IN_ARCHIVE:
- return "File not in archive";
- case HAS_MORE_METASTRINGS:
- return "Not a manifest";
- case BUCKET_ERROR:
- return "Internal temp files error, maybe disk full or
permissions problem?";
- case DATA_NOT_FOUND:
- return "Data not found";
- case ROUTE_NOT_FOUND:
- return "Route not found - could not find enough nodes
to be sure the data doesn't exist";
- case REJECTED_OVERLOAD:
- return "A node was overloaded or timed out";
- case INTERNAL_ERROR:
- return "Internal error, probably a bug";
- case TRANSFER_FAILED:
- return "Found the file, but lost it while receiving the
data";
- case SPLITFILE_ERROR:
- return "Splitfile error";
- case INVALID_URI:
- return "Invalid URI";
- case TOO_BIG:
- return "Too big";
- case TOO_BIG_METADATA:
- return "Metadata too big";
- case TOO_MANY_BLOCKS_PER_SEGMENT:
- return "Too many blocks per segment";
- case NOT_ENOUGH_METASTRINGS:
- return "No default document; give more metastrings in
URI";
- default:
- return "Unknown fetch error code: "+mode;
- }
- }
-
- // FIXME many of these are not used any more
-
- /** Too many levels of recursion into archives */
- public static final int TOO_DEEP_ARCHIVE_RECURSION = 1;
- /** Don't know what to do with splitfile */
- public static final int UNKNOWN_SPLITFILE_METADATA = 2;
- /** Too many redirects */
- public static final int TOO_MANY_REDIRECTS = 16;
- /** Don't know what to do with metadata */
- public static final int UNKNOWN_METADATA = 3;
- /** Got a MetadataParseException */
- public static final int INVALID_METADATA = 4;
- /** Got an ArchiveFailureException */
- public static final int ARCHIVE_FAILURE = 5;
- /** Failed to decode a block */
- public static final int BLOCK_DECODE_ERROR = 6;
- /** Too many split metadata levels */
- public static final int TOO_MANY_METADATA_LEVELS = 7;
- /** Too many archive restarts */
- public static final int TOO_MANY_ARCHIVE_RESTARTS = 8;
- /** Too deep recursion */
- public static final int TOO_MUCH_RECURSION = 9;
- /** Tried to access an archive file but not in an archive */
- public static final int NOT_IN_ARCHIVE = 10;
- /** Has more metastrings, can't fulfill them */
- public static final int HAS_MORE_METASTRINGS = 11;
- /** Failed to read from or write to a bucket; a kind of internal error
*/
- public static final int BUCKET_ERROR = 12;
- /** Data not found */
- public static final int DATA_NOT_FOUND = 13;
- /** Route not found */
- public static final int ROUTE_NOT_FOUND = 14;
- /** Downstream overload */
- public static final int REJECTED_OVERLOAD = 15;
- /** An internal error occurred */
- public static final int INTERNAL_ERROR = 17;
- /** The node found the data but the transfer failed */
- public static final int TRANSFER_FAILED = 18;
- /** Splitfile error. This should be a SplitFetchException. */
- public static final int SPLITFILE_ERROR = 19;
- /** Invalid URI. */
- public static final int INVALID_URI = 20;
- /** Too big */
- public static final int TOO_BIG = 21;
- /** Metadata too big */
- public static final int TOO_BIG_METADATA = 22;
- /** Splitfile has too big segments */
- public static final int TOO_MANY_BLOCKS_PER_SEGMENT = 23;
- /** Not enough meta strings in URI given and no default document */
- public static final int NOT_ENOUGH_METASTRINGS = 24;
-}
Copied: branches/async-client-layer/src/freenet/client/FetchException.java
(from rev 7873, trunk/freenet/src/freenet/client/FetchException.java)
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java 2006-01-18
16:38:29 UTC (rev 7873)
+++ branches/async-client-layer/src/freenet/client/FetchException.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -0,0 +1,245 @@
+package freenet.client;
+
+import freenet.support.Logger;
+
+/**
+ * Generic exception thrown by a Fetcher. All other exceptions are converted
to one of
+ * these to tell the client.
+ */
+public class FetchException extends Exception {
+
+ private static final long serialVersionUID = -1106716067841151962L;
+
+ public final int mode;
+
+ /** For collection errors */
+ public final FailureCodeTracker errorCodes;
+
+ public final String extraMessage;
+
+ /** Get the failure mode. */
+ public int getMode() {
+ return mode;
+ }
+
+ public FetchException(int m) {
+ super(getMessage(m));
+ extraMessage = null;
+ mode = m;
+ errorCodes = null;
+ Logger.minor(this, "FetchException("+getMessage(mode)+")",
this);
+ }
+
+ public FetchException(MetadataParseException e) {
+ super(getMessage(INVALID_METADATA)+": "+e.getMessage());
+ extraMessage = e.getMessage();
+ mode = INVALID_METADATA;
+ errorCodes = null;
+ initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
+ }
+
+ public FetchException(ArchiveFailureException e) {
+ super(getMessage(ARCHIVE_FAILURE)+": "+e.getMessage());
+ extraMessage = e.getMessage();
+ mode = ARCHIVE_FAILURE;
+ errorCodes = null;
+ initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e);
+ }
+
+ public FetchException(ArchiveRestartException e) {
+ super(getMessage(ARCHIVE_RESTART)+": "+e.getMessage());
+ extraMessage = e.getMessage();
+ mode = ARCHIVE_FAILURE;
+ errorCodes = null;
+ initCause(e);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+e,e); }
+
+ public FetchException(int mode, Throwable t) {
+ super(getMessage(mode)+": "+t.getMessage());
+ extraMessage = t.getMessage();
+ this.mode = mode;
+ errorCodes = null;
+ initCause(t);
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+t.getMessage(),t);
+ }
+
+ public FetchException(int mode, FailureCodeTracker errorCodes) {
+ super(getMessage(mode));
+ extraMessage = null;
+ this.mode = mode;
+ this.errorCodes = errorCodes;
+ Logger.minor(this, "FetchException("+getMessage(mode)+")");
+
+ }
+
+ public FetchException(int mode, String msg) {
+ super(getMessage(mode)+": "+msg);
+ extraMessage = msg;
+ errorCodes = null;
+ this.mode = mode;
+ Logger.minor(this, "FetchException("+getMessage(mode)+"):
"+msg,this);
+ }
+
+ public static String getMessage(int mode) {
+ switch(mode) {
+ case TOO_DEEP_ARCHIVE_RECURSION:
+ return "Too many levels of recursion into archives";
+ case UNKNOWN_SPLITFILE_METADATA:
+ return "Don't know what to do with splitfile";
+ case TOO_MANY_REDIRECTS:
+ return "Too many redirects - loop?";
+ case UNKNOWN_METADATA:
+ return "Don't know what to do with metadata";
+ case INVALID_METADATA:
+ return "Failed to parse metadata";
+ case ARCHIVE_FAILURE:
+ return "Failure in extracting files from an archive";
+ case BLOCK_DECODE_ERROR:
+ return "Failed to decode a splitfile block";
+ case TOO_MANY_METADATA_LEVELS:
+ return "Too many levels of split metadata";
+ case TOO_MANY_ARCHIVE_RESTARTS:
+ return "Request was restarted too many times due to
archives changing";
+ case TOO_MUCH_RECURSION:
+ return "Too many redirects (too much recursion)"; //
FIXME: ???
+ case NOT_IN_ARCHIVE:
+ return "File not in archive";
+ case HAS_MORE_METASTRINGS:
+ return "Not a manifest";
+ case BUCKET_ERROR:
+ return "Internal temp files error, maybe disk full or
permissions problem?";
+ case DATA_NOT_FOUND:
+ return "Data not found";
+ case ROUTE_NOT_FOUND:
+ return "Route not found - could not find enough nodes
to be sure the data doesn't exist";
+ case REJECTED_OVERLOAD:
+ return "A node was overloaded or timed out";
+ case INTERNAL_ERROR:
+ return "Internal error, probably a bug";
+ case TRANSFER_FAILED:
+ return "Found the file, but lost it while receiving the
data";
+ case SPLITFILE_ERROR:
+ return "Splitfile error";
+ case INVALID_URI:
+ return "Invalid URI";
+ case TOO_BIG:
+ return "Too big";
+ case TOO_BIG_METADATA:
+ return "Metadata too big";
+ case TOO_MANY_BLOCKS_PER_SEGMENT:
+ return "Too many blocks per segment";
+ case NOT_ENOUGH_METASTRINGS:
+ return "No default document; give more metastrings in
URI";
+ case CANCELLED:
+ return "Cancelled by caller";
+ case ARCHIVE_RESTART:
+ return "Archive restart";
+ default:
+ return "Unknown fetch error code: "+mode;
+ }
+ }
+
+ // FIXME many of these are not used any more
+
+ /** Too many levels of recursion into archives */
+ public static final int TOO_DEEP_ARCHIVE_RECURSION = 1;
+ /** Don't know what to do with splitfile */
+ public static final int UNKNOWN_SPLITFILE_METADATA = 2;
+ /** Too many redirects */
+ public static final int TOO_MANY_REDIRECTS = 16;
+ /** Don't know what to do with metadata */
+ public static final int UNKNOWN_METADATA = 3;
+ /** Got a MetadataParseException */
+ public static final int INVALID_METADATA = 4;
+ /** Got an ArchiveFailureException */
+ public static final int ARCHIVE_FAILURE = 5;
+ /** Failed to decode a block */
+ public static final int BLOCK_DECODE_ERROR = 6;
+ /** Too many split metadata levels */
+ public static final int TOO_MANY_METADATA_LEVELS = 7;
+ /** Too many archive restarts */
+ public static final int TOO_MANY_ARCHIVE_RESTARTS = 8;
+ /** Too deep recursion */
+ public static final int TOO_MUCH_RECURSION = 9;
+ /** Tried to access an archive file but not in an archive */
+ public static final int NOT_IN_ARCHIVE = 10;
+ /** Has more metastrings, can't fulfill them */
+ public static final int HAS_MORE_METASTRINGS = 11;
+ /** Failed to read from or write to a bucket; a kind of internal error
*/
+ public static final int BUCKET_ERROR = 12;
+ /** Data not found */
+ public static final int DATA_NOT_FOUND = 13;
+ /** Route not found */
+ public static final int ROUTE_NOT_FOUND = 14;
+ /** Downstream overload */
+ public static final int REJECTED_OVERLOAD = 15;
+ /** An internal error occurred */
+ public static final int INTERNAL_ERROR = 17;
+ /** The node found the data but the transfer failed */
+ public static final int TRANSFER_FAILED = 18;
+ /** Splitfile error. This should be a SplitFetchException. */
+ public static final int SPLITFILE_ERROR = 19;
+ /** Invalid URI. */
+ public static final int INVALID_URI = 20;
+ /** Too big */
+ public static final int TOO_BIG = 21;
+ /** Metadata too big */
+ public static final int TOO_BIG_METADATA = 22;
+ /** Splitfile has too big segments */
+ public static final int TOO_MANY_BLOCKS_PER_SEGMENT = 23;
+ /** Not enough meta strings in URI given and no default document */
+ public static final int NOT_ENOUGH_METASTRINGS = 24;
+ /** Explicitly cancelled */
+ public static final int CANCELLED = 25;
+ /** Archive restart */
+ public static final int ARCHIVE_RESTART = 26;
+
+ /** Is an error fatal i.e. is there no point retrying? */
+ public boolean isFatal() {
+ switch(mode) {
+ // Problems with the data as inserted. No point retrying.
+ case FetchException.ARCHIVE_FAILURE:
+ case FetchException.BLOCK_DECODE_ERROR:
+ case FetchException.HAS_MORE_METASTRINGS:
+ case FetchException.INVALID_METADATA:
+ case FetchException.NOT_IN_ARCHIVE:
+ case FetchException.TOO_DEEP_ARCHIVE_RECURSION:
+ case FetchException.TOO_MANY_ARCHIVE_RESTARTS:
+ case FetchException.TOO_MANY_METADATA_LEVELS:
+ case FetchException.TOO_MANY_REDIRECTS:
+ case FetchException.TOO_MUCH_RECURSION:
+ case FetchException.UNKNOWN_METADATA:
+ case FetchException.UNKNOWN_SPLITFILE_METADATA:
+ case FetchException.TOO_BIG:
+ return true;
+
+ // Low level errors, can be retried
+ case FetchException.DATA_NOT_FOUND:
+ case FetchException.ROUTE_NOT_FOUND:
+ case FetchException.REJECTED_OVERLOAD:
+ case FetchException.TRANSFER_FAILED:
+ return false;
+
+ case FetchException.BUCKET_ERROR:
+ case FetchException.INTERNAL_ERROR:
+ // Maybe fatal
+ return false;
+
+ case FetchException.SPLITFILE_ERROR:
+ // Fatal, because there are internal retries
+ return true;
+
+ // Wierd ones
+ case FetchException.CANCELLED:
+ case FetchException.ARCHIVE_RESTART:
+ // Fatal
+ return true;
+
+ default:
+ Logger.error(this, "Do not know if error code is fatal:
"+getMessage(mode));
+ return false; // assume it isn't
+ }
+ }
+}
Deleted: branches/async-client-layer/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/Fetcher.java 2006-01-20
19:03:11 UTC (rev 7885)
@@ -1,375 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.Iterator;
-import java.util.LinkedList;
-
-import freenet.client.events.DecodedBlockEvent;
-import freenet.client.events.FetchedMetadataEvent;
-import freenet.client.events.GotBlockEvent;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.FreenetURI;
-import freenet.keys.KeyDecodeException;
-import freenet.node.LowLevelGetException;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-/** Class that does the actual fetching. Does not have to have a user friendly
- * interface!
- */
-class Fetcher {
-
- /** The original URI to be fetched. */
- final FreenetURI origURI;
- /** The settings for the fetch e.g. max file size */
- final FetcherContext ctx;
- /** The archive context object to be passed down the entire request.
This is
- * recreated if we get an ArchiveRestartException. It does loop
detection, partly
- * in order to prevent rare deadlocks.
- */
- ArchiveContext archiveContext;
-
- /**
- * Local-only constructor, with ArchiveContext, for recursion via e.g.
archives.
- */
- Fetcher(FreenetURI uri, FetcherContext fctx, ArchiveContext actx) {
- if(uri == null) throw new NullPointerException();
- origURI = uri;
- ctx = fctx;
- archiveContext = actx;
- }
-
- /**
- * Create a Fetcher. Public constructor, for when starting a new
request chain.
- * @param uri The key to fetch.
- * @param ctx The settings for the fetch.
- */
- public Fetcher(FreenetURI uri, FetcherContext ctx) {
- this(uri, ctx, new ArchiveContext());
- }
-
- /**
- * Fetch the key. Called by clients.
- * @return The key requested's data and client metadata.
- * @throws FetchException If we cannot fetch the key for some reason.
Various
- * other exceptions are used internally; they are converted to a
FetchException
- * by this driver routine.
- */
- public FetchResult run() throws FetchException {
- for(int i=0;i<ctx.maxArchiveRestarts;i++) {
- try {
- ClientMetadata dm = new ClientMetadata();
- ClientKey key;
- try {
- key = ClientKey.getBaseKey(origURI);
- } catch (MalformedURLException e2) {
- throw new
FetchException(FetchException.INVALID_URI, "Invalid URI: "+origURI);
- }
- LinkedList metaStrings =
origURI.listMetaStrings();
-
- FetchResult fr = realRun(dm, 0, key,
metaStrings, ctx.dontEnterImplicitArchives, ctx.localRequestOnly);
-
- if(metaStrings.isEmpty()) return fr;
- // Still got some meta-strings
- throw new
FetchException(FetchException.HAS_MORE_METASTRINGS);
-
- } catch (ArchiveRestartException e) {
- archiveContext = new ArchiveContext();
- continue;
- } catch (MetadataParseException e) {
- throw new FetchException(e);
- } catch (ArchiveFailureException e) {
-
if(e.getMessage().equals(ArchiveFailureException.TOO_MANY_LEVELS))
- throw new
FetchException(FetchException.TOO_DEEP_ARCHIVE_RECURSION);
- throw new FetchException(e);
- }
- }
- throw new
FetchException(FetchException.TOO_MANY_ARCHIVE_RESTARTS);
- }
-
- FetchResult realRun(ClientMetadata dm, int recursionLevel, FreenetURI
uri, boolean dontEnterImplicitArchives, boolean localOnly)
- throws FetchException, MetadataParseException, ArchiveFailureException,
ArchiveRestartException {
- ClientKey key;
- try {
- key = ClientKey.getBaseKey(origURI);
- } catch (MalformedURLException e2) {
- throw new FetchException(FetchException.INVALID_URI,
"Invalid URI: "+origURI);
- }
- LinkedList metaStrings = origURI.listMetaStrings();
-
- return realRun(dm, recursionLevel, key, metaStrings,
dontEnterImplicitArchives, localOnly);
- }
-
- /**
- * Fetch a key, within an overall fetch process. Called by self in
recursion, and
- * called by driver function @see run() .
- * @param dm The client metadata object to accumulate client metadata
in.
- * @param recursionLevel The recursion level. Incremented every time we
enter
- * realRun(). If it goes above a certain limit, we throw a
FetchException.
- * @param uri The URI to fetch.
- * @return The data, complete with client metadata.
- * @throws FetchException If we could not fetch the data.
- * @throws MetadataParseException If we could not parse the metadata.
- * @throws ArchiveFailureException If we could not extract data from an
archive.
- * @throws ArchiveRestartException
- */
- FetchResult realRun(ClientMetadata dm, int recursionLevel, ClientKey
key, LinkedList metaStrings, boolean dontEnterImplicitArchives, boolean
localOnly)
- throws FetchException, MetadataParseException, ArchiveFailureException,
ArchiveRestartException {
- Logger.minor(this, "Running fetch for: "+key);
- recursionLevel++;
- if(recursionLevel > ctx.maxRecursionLevel)
- throw new
FetchException(FetchException.TOO_MUCH_RECURSION, ""+recursionLevel+" should be
< "+ctx.maxRecursionLevel);
-
- // Do the fetch
- ClientKeyBlock block;
- try {
- block = ctx.client.getKey(key, localOnly,
ctx.starterClient, ctx.cacheLocalRequests);
- } catch (LowLevelGetException e) {
- switch(e.code) {
- case LowLevelGetException.DATA_NOT_FOUND:
- throw new
FetchException(FetchException.DATA_NOT_FOUND);
- case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
- throw new
FetchException(FetchException.DATA_NOT_FOUND);
- case LowLevelGetException.DECODE_FAILED:
- throw new
FetchException(FetchException.BLOCK_DECODE_ERROR);
- case LowLevelGetException.INTERNAL_ERROR:
- throw new
FetchException(FetchException.INTERNAL_ERROR);
- case LowLevelGetException.REJECTED_OVERLOAD:
- throw new
FetchException(FetchException.REJECTED_OVERLOAD);
- case LowLevelGetException.ROUTE_NOT_FOUND:
- throw new
FetchException(FetchException.ROUTE_NOT_FOUND);
- case LowLevelGetException.TRANSFER_FAILED:
- throw new
FetchException(FetchException.TRANSFER_FAILED);
- case LowLevelGetException.VERIFY_FAILED:
- throw new
FetchException(FetchException.BLOCK_DECODE_ERROR);
- default:
- Logger.error(this, "Unknown
LowLevelGetException code: "+e.code);
- throw new
FetchException(FetchException.INTERNAL_ERROR);
- }
- }
-
- ctx.eventProducer.produceEvent(new GotBlockEvent(key));
-
- Bucket data;
- try {
- data = block.decode(ctx.bucketFactory, (int)
(Math.min(ctx.maxTempLength, Integer.MAX_VALUE)));
- } catch (KeyDecodeException e1) {
- throw new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage());
- } catch (IOException e) {
- Logger.error(this, "Could not capture data - disk
full?: "+e, e);
- throw new FetchException(FetchException.BUCKET_ERROR,
e);
- }
-
- ctx.eventProducer.produceEvent(new DecodedBlockEvent(key));
-
- if(!block.isMetadata()) {
- // Just return the data
- return new FetchResult(dm, data);
- }
-
- // Otherwise we need to parse the metadata
-
- if(data.size() > ctx.maxMetadataSize)
- throw new
FetchException(FetchException.TOO_BIG_METADATA);
- Metadata metadata;
- try {
- metadata =
Metadata.construct(BucketTools.toByteArray(data));
- } catch (IOException e) {
- throw new FetchException(FetchException.BUCKET_ERROR,
e);
- }
-
- ctx.eventProducer.produceEvent(new FetchedMetadataEvent());
-
- return runMetadata(dm, recursionLevel, key, metaStrings,
metadata, null, key.getURI(), dontEnterImplicitArchives, localOnly);
- }
-
- /**
- * Fetch data, from metadata.
- * @param recursionLevel The recursion level, from above. Not
incremented here, as we will
- * go through realRun() if the key changes, so the number of passes
here is severely limited.
- * @param key The key being fetched.
- * @param metaStrings List of unused meta strings (to be used by
manifests).
- * @param metadata The parsed metadata to process.
- * @param container The container in which this metadata is found.
- * @throws MetadataParseException If we could not parse metadata from a
sub-document. Will be
- * converted to a FetchException above.
- * @throws ArchiveFailureException If extracting data from an archive
failed.
- * @throws FetchException If the fetch failed for some reason.
- * @throws ArchiveRestartException
- */
- private FetchResult runMetadata(ClientMetadata dm, int recursionLevel,
ClientKey key, LinkedList metaStrings,
- Metadata metadata, ArchiveHandler container, FreenetURI
thisKey, boolean dontEnterImplicitArchives, boolean localOnly)
- throws MetadataParseException, FetchException, ArchiveFailureException,
ArchiveRestartException {
-
- if(metadata.isSimpleManifest()) {
- String name;
- if(metaStrings.isEmpty())
- name = null;
- else
- name = (String) metaStrings.removeFirst();
- // Since metadata is a document, we just replace
metadata here
- if(name == null) {
- metadata = metadata.getDefaultDocument();
- if(metadata == null)
- throw new
FetchException(FetchException.NOT_ENOUGH_METASTRINGS);
- } else {
- metadata = metadata.getDocument(name);
- thisKey = thisKey.pushMetaString(name);
- if(metadata == null)
- throw new
FetchException(FetchException.NOT_IN_ARCHIVE);
- }
- return runMetadata(dm, recursionLevel, key,
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives,
localOnly);
- } else if(metadata.isArchiveManifest()) {
- container = ctx.archiveManager.makeHandler(thisKey,
metadata.getArchiveType(), false);
- Bucket metadataBucket =
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
- try {
- metadata = Metadata.construct(metadataBucket);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR);
- }
- return runMetadata(dm, recursionLevel+1, key,
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives,
localOnly);
- } else if(metadata.isArchiveInternalRedirect()) {
- if(container == null)
- throw new
FetchException(FetchException.NOT_IN_ARCHIVE);
- else {
- /* Implicit archive handling:
- * Sooner or later we reach a
SimpleFileRedirect to data, a Splitfile to data,
- * or an ArchiveInternalRedirect to data.
- *
- * In this case, if it is an archive type, if
implicit archive handling is enabled, and if
- * we have more meta-strings, we can try to
enter it.
- */
- if((!dontEnterImplicitArchives) &&
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) &&
(!metaStrings.isEmpty())) {
- // Possible implicit archive inside
archive?
- container =
ctx.archiveManager.makeHandler(thisKey,
ArchiveManager.getArchiveType(dm.getMIMEType()), false);
- Bucket metadataBucket =
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
- try {
- metadata =
Metadata.construct(metadataBucket);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR);
- }
- return runMetadata(dm,
recursionLevel+1, key, metaStrings, metadata, container, thisKey,
dontEnterImplicitArchives, localOnly);
- }
- Bucket result =
container.get(metadata.getZIPInternalName(), archiveContext, ctx, dm,
recursionLevel, true);
-
dm.mergeNoOverwrite(metadata.getClientMetadata());
- return new FetchResult(dm, result);
- }
- } else if(metadata.isMultiLevelMetadata()) {
- // Doesn't have to be a splitfile; could be from a ZIP
or a plain file.
- metadata.setSimpleRedirect();
- FetchResult res = runMetadata(dm, recursionLevel, key,
metaStrings, metadata, container, thisKey, true, localOnly);
- try {
- metadata = Metadata.construct(res.data);
- } catch (MetadataParseException e) {
- throw new
FetchException(FetchException.INVALID_METADATA, e);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR, e);
- }
- return runMetadata(dm, recursionLevel, key,
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives,
localOnly);
- } else if(metadata.isSingleFileRedirect()) {
- FreenetURI uri = metadata.getSingleTarget();
- dm.mergeNoOverwrite(metadata.getClientMetadata());
- if((!dontEnterImplicitArchives) &&
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) &&
(!metaStrings.isEmpty())) {
- // Is probably an implicit archive.
- ClientKey target;
- try {
- target = ClientKey.getBaseKey(uri);
- } catch (MalformedURLException e1) {
- throw new
FetchException(FetchException.INVALID_URI, "Invalid URI: "+uri);
- }
- // Probably a usable archive as-is. We may not
have to fetch it.
- container = ctx.archiveManager.makeHandler(uri,
ArchiveManager.getArchiveType(dm.getMIMEType()), true);
- if(container != null) {
- Bucket metadataBucket =
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
- try {
- metadata =
Metadata.construct(metadataBucket);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR);
- }
- return runMetadata(dm,
recursionLevel+1, key, metaStrings, metadata, container, thisKey,
dontEnterImplicitArchives, localOnly);
- } // else just fetch it, create context later
- }
-
-
- ClientKey newKey;
- try {
- newKey = ClientKey.getBaseKey(uri);
- } catch (MalformedURLException e2) {
- throw new
FetchException(FetchException.INVALID_URI, "Invalid URI: "+uri);
- }
-
- LinkedList newMetaStrings = uri.listMetaStrings();
-
- // Move any new meta strings to beginning of our list
of remaining meta strings
- while(!newMetaStrings.isEmpty()) {
- Object o = newMetaStrings.removeLast();
- metaStrings.addFirst(o);
- }
-
- FetchResult fr = realRun(dm, recursionLevel, newKey,
metaStrings, dontEnterImplicitArchives, localOnly);
- if(metadata.isCompressed()) {
- Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
- Bucket data = fr.data;
- Bucket output;
- try {
- long maxLen = ctx.maxTempLength;
- if(maxLen < 0) maxLen = Long.MAX_VALUE;
- output = codec.decompress(data,
ctx.bucketFactory, maxLen);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR, e);
- } catch (CompressionOutputSizeException e) {
- throw new
FetchException(FetchException.TOO_BIG);
- }
- return new FetchResult(fr, output);
- }
- return fr;
- } else if(metadata.isSplitfile()) {
- // Straight data splitfile.
- // Might be used by parents for something else, in
which case they will set dontEnterImplicitArchives.
- dm.mergeNoOverwrite(metadata.getClientMetadata()); //
even splitfiles can have mime types!
- if((!dontEnterImplicitArchives) &&
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) &&
(!metaStrings.isEmpty())) {
- // We know target is not metadata.
- container =
ctx.archiveManager.makeHandler(thisKey,
ArchiveManager.getArchiveType(dm.getMIMEType()), false);
- Bucket metadataBucket =
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
- try {
- metadata =
Metadata.construct(metadataBucket);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR, e);
- }
- return runMetadata(dm, recursionLevel+1, key,
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives,
localOnly);
- }
-
- FetcherContext newCtx;
- if(metadata.splitUseLengths)
- newCtx = new FetcherContext(ctx,
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
- else
- newCtx = new FetcherContext(ctx,
FetcherContext.SPLITFILE_DEFAULT_MASK);
-
- SplitFetcher sf = new SplitFetcher(metadata,
archiveContext, newCtx, recursionLevel);
- Bucket sfResult = sf.fetch(); // will throw in event of
error
- if(metadata.isCompressed()) {
- Logger.minor(this, "Is compressed:
"+metadata.compressionCodec);
- Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
- try {
- long maxLen = ctx.maxTempLength;
- if(maxLen < 0) maxLen = Long.MAX_VALUE;
- sfResult = codec.decompress(sfResult,
ctx.bucketFactory, maxLen);
- } catch (IOException e) {
- throw new
FetchException(FetchException.BUCKET_ERROR, e);
- } catch (CompressionOutputSizeException e) {
- throw new
FetchException(FetchException.TOO_BIG);
- }
- } else
- Logger.minor(this, "Not compressed
("+metadata.compressionCodec+")");
- return new FetchResult(dm, sfResult);
- } else {
- Logger.error(this, "Don't know what to do with
metadata: "+metadata);
- throw new
FetchException(FetchException.UNKNOWN_METADATA);
- }
- }
-}
Copied: branches/async-client-layer/src/freenet/client/Fetcher.java (from rev
7873, trunk/freenet/src/freenet/client/Fetcher.java)
Deleted: branches/async-client-layer/src/freenet/client/FetcherContext.java
===================================================================
--- trunk/freenet/src/freenet/client/FetcherContext.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/FetcherContext.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,157 +0,0 @@
-package freenet.client;
-
-import freenet.client.events.ClientEventProducer;
-import freenet.crypt.RandomSource;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
-import freenet.support.BucketFactory;
-
-/** Context for a Fetcher. Contains all the settings a Fetcher needs to know
about. */
-public class FetcherContext implements Cloneable {
-
- static final int SPLITFILE_DEFAULT_BLOCK_MASK = 1;
- static final int SPLITFILE_DEFAULT_MASK = 2;
- static final int SPLITFILE_USE_LENGTHS_MASK = 3;
- /** Low-level client to send low-level requests to. */
- final SimpleLowLevelClient client;
- final long maxOutputLength;
- final long maxTempLength;
- final ArchiveManager archiveManager;
- final BucketFactory bucketFactory;
- final int maxRecursionLevel;
- final int maxArchiveRestarts;
- final boolean dontEnterImplicitArchives;
- final int maxSplitfileThreads;
- final int maxSplitfileBlockRetries;
- final int maxNonSplitfileRetries;
- final RandomSource random;
- final boolean allowSplitfiles;
- final boolean followRedirects;
- final boolean localRequestOnly;
- final ClientEventProducer eventProducer;
- /** Whether to allow non-full blocks, or blocks which are not direct
CHKs, in splitfiles.
- * Set by the splitfile metadata and the mask constructor, so we don't
need to pass it in. */
- final boolean splitfileUseLengths;
- final int maxMetadataSize;
- final int maxDataBlocksPerSegment;
- final int maxCheckBlocksPerSegment;
- final RequestStarterClient starterClient;
- final boolean cacheLocalRequests;
-
-
- public FetcherContext(SimpleLowLevelClient client, long curMaxLength,
- long curMaxTempLength, int maxMetadataSize, int
maxRecursionLevel, int maxArchiveRestarts,
- boolean dontEnterImplicitArchives, int
maxSplitfileThreads,
- int maxSplitfileBlockRetries, int
maxNonSplitfileRetries,
- boolean allowSplitfiles, boolean followRedirects,
boolean localRequestOnly,
- int maxDataBlocksPerSegment, int
maxCheckBlocksPerSegment,
- RandomSource random, ArchiveManager archiveManager,
BucketFactory bucketFactory,
- ClientEventProducer producer, RequestStarterClient
starter, boolean cacheLocalRequests) {
- this.client = client;
- this.maxOutputLength = curMaxLength;
- this.maxTempLength = curMaxTempLength;
- this.maxMetadataSize = maxMetadataSize;
- this.archiveManager = archiveManager;
- this.bucketFactory = bucketFactory;
- this.maxRecursionLevel = maxRecursionLevel;
- this.maxArchiveRestarts = maxArchiveRestarts;
- this.dontEnterImplicitArchives = dontEnterImplicitArchives;
- this.random = random;
- this.maxSplitfileThreads = maxSplitfileThreads;
- this.maxSplitfileBlockRetries = maxSplitfileBlockRetries;
- this.maxNonSplitfileRetries = maxNonSplitfileRetries;
- this.allowSplitfiles = allowSplitfiles;
- this.followRedirects = followRedirects;
- this.localRequestOnly = localRequestOnly;
- this.splitfileUseLengths = false;
- this.eventProducer = producer;
- this.maxDataBlocksPerSegment = maxDataBlocksPerSegment;
- this.maxCheckBlocksPerSegment = maxCheckBlocksPerSegment;
- this.starterClient = starter;
- this.cacheLocalRequests = cacheLocalRequests;
- }
-
- public FetcherContext(FetcherContext ctx, int maskID) {
- if(maskID == SPLITFILE_DEFAULT_BLOCK_MASK) {
- this.client = ctx.client;
- this.maxOutputLength = ctx.maxOutputLength;
- this.maxMetadataSize = ctx.maxMetadataSize;
- this.maxTempLength = ctx.maxTempLength;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
- this.maxRecursionLevel = 1;
- this.maxArchiveRestarts = 0;
- this.dontEnterImplicitArchives = true;
- this.random = ctx.random;
- this.maxSplitfileThreads = 0;
- this.maxSplitfileBlockRetries = 0;
- this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
- this.allowSplitfiles = false;
- this.followRedirects = false;
- this.localRequestOnly = ctx.localRequestOnly;
- this.splitfileUseLengths = false;
- this.eventProducer = ctx.eventProducer;
- this.maxDataBlocksPerSegment = 0;
- this.maxCheckBlocksPerSegment = 0;
- this.starterClient = ctx.starterClient;
- this.cacheLocalRequests = ctx.cacheLocalRequests;
- } else if(maskID == SPLITFILE_DEFAULT_MASK) {
- this.client = ctx.client;
- this.maxOutputLength = ctx.maxOutputLength;
- this.maxTempLength = ctx.maxTempLength;
- this.maxMetadataSize = ctx.maxMetadataSize;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
- this.maxRecursionLevel = ctx.maxRecursionLevel;
- this.maxArchiveRestarts = ctx.maxArchiveRestarts;
- this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
- this.random = ctx.random;
- this.maxSplitfileThreads = ctx.maxSplitfileThreads;
- this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
- this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
- this.allowSplitfiles = ctx.allowSplitfiles;
- this.followRedirects = ctx.followRedirects;
- this.localRequestOnly = ctx.localRequestOnly;
- this.splitfileUseLengths = false;
- this.eventProducer = ctx.eventProducer;
- this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
- this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
- this.starterClient = ctx.starterClient;
- this.cacheLocalRequests = ctx.cacheLocalRequests;
- } else if(maskID == SPLITFILE_USE_LENGTHS_MASK) {
- this.client = ctx.client;
- this.maxOutputLength = ctx.maxOutputLength;
- this.maxTempLength = ctx.maxTempLength;
- this.maxMetadataSize = ctx.maxMetadataSize;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
- this.maxRecursionLevel = ctx.maxRecursionLevel;
- this.maxArchiveRestarts = ctx.maxArchiveRestarts;
- this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
- this.random = ctx.random;
- this.maxSplitfileThreads = ctx.maxSplitfileThreads;
- this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
- this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
- this.allowSplitfiles = ctx.allowSplitfiles;
- this.followRedirects = ctx.followRedirects;
- this.localRequestOnly = ctx.localRequestOnly;
- this.splitfileUseLengths = true;
- this.eventProducer = ctx.eventProducer;
- this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
- this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
- this.starterClient = ctx.starterClient;
- this.cacheLocalRequests = ctx.cacheLocalRequests;
- } else throw new IllegalArgumentException();
- }
-
- /** Make public, but just call parent for a field for field copy */
- public Object clone() {
- try {
- return super.clone();
- } catch (CloneNotSupportedException e) {
- // Impossible
- throw new Error(e);
- }
- }
-
-}
Copied: branches/async-client-layer/src/freenet/client/FetcherContext.java
(from rev 7873, trunk/freenet/src/freenet/client/FetcherContext.java)
===================================================================
--- trunk/freenet/src/freenet/client/FetcherContext.java 2006-01-18
16:38:29 UTC (rev 7873)
+++ branches/async-client-layer/src/freenet/client/FetcherContext.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -0,0 +1,223 @@
+package freenet.client;
+
+import freenet.client.events.ClientEventProducer;
+import freenet.crypt.RandomSource;
+import freenet.node.RequestStarterClient;
+import freenet.node.SimpleLowLevelClient;
+import freenet.support.BucketFactory;
+
+/** Context for a Fetcher. Contains all the settings a Fetcher needs to know
about. */
+public class FetcherContext implements Cloneable {
+
+ public static final int IDENTICAL_MASK = 0;
+ public static final int SPLITFILE_DEFAULT_BLOCK_MASK = 1;
+ public static final int SPLITFILE_DEFAULT_MASK = 2;
+ public static final int SPLITFILE_USE_LENGTHS_MASK = 3;
+ public static final int SET_RETURN_ARCHIVES = 4;
+ /** Low-level client to send low-level requests to. */
+ final SimpleLowLevelClient client;
+ public long maxOutputLength;
+ public long maxTempLength;
+ public final ArchiveManager archiveManager;
+ public final BucketFactory bucketFactory;
+ public int maxRecursionLevel;
+ public int maxArchiveRestarts;
+ public boolean dontEnterImplicitArchives;
+ public int maxSplitfileThreads;
+ public int maxSplitfileBlockRetries;
+ public int maxNonSplitfileRetries;
+ public final RandomSource random;
+ public boolean allowSplitfiles;
+ public boolean followRedirects;
+ public boolean localRequestOnly;
+ public boolean ignoreStore;
+ public final ClientEventProducer eventProducer;
+ /** Whether to allow non-full blocks, or blocks which are not direct
CHKs, in splitfiles.
+ * Set by the splitfile metadata and the mask constructor, so we don't
need to pass it in. */
+ public boolean splitfileUseLengths;
+ public int maxMetadataSize;
+ public int maxDataBlocksPerSegment;
+ public int maxCheckBlocksPerSegment;
+ public final RequestStarterClient starterClient;
+ public boolean cacheLocalRequests;
+ private boolean cancelled;
+ /** If true, and we get a ZIP manifest, and we have no meta-strings
left, then
+ * return the manifest contents as data. */
+ public boolean returnZIPManifests;
+
+ public final boolean isCancelled() {
+ return cancelled;
+ }
+
+ public FetcherContext(SimpleLowLevelClient client, long curMaxLength,
+ long curMaxTempLength, int maxMetadataSize, int
maxRecursionLevel, int maxArchiveRestarts,
+ boolean dontEnterImplicitArchives, int
maxSplitfileThreads,
+ int maxSplitfileBlockRetries, int
maxNonSplitfileRetries,
+ boolean allowSplitfiles, boolean followRedirects,
boolean localRequestOnly,
+ int maxDataBlocksPerSegment, int
maxCheckBlocksPerSegment,
+ RandomSource random, ArchiveManager archiveManager,
BucketFactory bucketFactory,
+ ClientEventProducer producer, RequestStarterClient
starter, boolean cacheLocalRequests) {
+ this.client = client;
+ this.maxOutputLength = curMaxLength;
+ this.maxTempLength = curMaxTempLength;
+ this.maxMetadataSize = maxMetadataSize;
+ this.archiveManager = archiveManager;
+ this.bucketFactory = bucketFactory;
+ this.maxRecursionLevel = maxRecursionLevel;
+ this.maxArchiveRestarts = maxArchiveRestarts;
+ this.dontEnterImplicitArchives = dontEnterImplicitArchives;
+ this.random = random;
+ this.maxSplitfileThreads = maxSplitfileThreads;
+ this.maxSplitfileBlockRetries = maxSplitfileBlockRetries;
+ this.maxNonSplitfileRetries = maxNonSplitfileRetries;
+ this.allowSplitfiles = allowSplitfiles;
+ this.followRedirects = followRedirects;
+ this.localRequestOnly = localRequestOnly;
+ this.splitfileUseLengths = false;
+ this.eventProducer = producer;
+ this.maxDataBlocksPerSegment = maxDataBlocksPerSegment;
+ this.maxCheckBlocksPerSegment = maxCheckBlocksPerSegment;
+ this.starterClient = starter;
+ this.cacheLocalRequests = cacheLocalRequests;
+ }
+
+ public FetcherContext(FetcherContext ctx, int maskID) {
+ if(maskID == IDENTICAL_MASK) {
+ this.client = ctx.client;
+ this.maxOutputLength = ctx.maxOutputLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
+ this.maxTempLength = ctx.maxTempLength;
+ this.archiveManager = ctx.archiveManager;
+ this.bucketFactory = ctx.bucketFactory;
+ this.maxRecursionLevel = ctx.maxRecursionLevel;
+ this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+ this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
+ this.random = ctx.random;
+ this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+ this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
+ this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
+ this.allowSplitfiles = ctx.allowSplitfiles;
+ this.followRedirects = ctx.followRedirects;
+ this.localRequestOnly = ctx.localRequestOnly;
+ this.splitfileUseLengths = ctx.splitfileUseLengths;
+ this.eventProducer = ctx.eventProducer;
+ this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
+ this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
+ this.starterClient = ctx.starterClient;
+ this.cacheLocalRequests = ctx.cacheLocalRequests;
+ this.returnZIPManifests = ctx.returnZIPManifests;
+ } else if(maskID == SPLITFILE_DEFAULT_BLOCK_MASK) {
+ this.client = ctx.client;
+ this.maxOutputLength = ctx.maxOutputLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
+ this.maxTempLength = ctx.maxTempLength;
+ this.archiveManager = ctx.archiveManager;
+ this.bucketFactory = ctx.bucketFactory;
+ this.maxRecursionLevel = 1;
+ this.maxArchiveRestarts = 0;
+ this.dontEnterImplicitArchives = true;
+ this.random = ctx.random;
+ this.maxSplitfileThreads = 0;
+ this.maxSplitfileBlockRetries = 0;
+ this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
+ this.allowSplitfiles = false;
+ this.followRedirects = false;
+ this.localRequestOnly = ctx.localRequestOnly;
+ this.splitfileUseLengths = false;
+ this.eventProducer = ctx.eventProducer;
+ this.maxDataBlocksPerSegment = 0;
+ this.maxCheckBlocksPerSegment = 0;
+ this.starterClient = ctx.starterClient;
+ this.cacheLocalRequests = ctx.cacheLocalRequests;
+ this.returnZIPManifests = false;
+ } else if(maskID == SPLITFILE_DEFAULT_MASK) {
+ this.client = ctx.client;
+ this.maxOutputLength = ctx.maxOutputLength;
+ this.maxTempLength = ctx.maxTempLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
+ this.archiveManager = ctx.archiveManager;
+ this.bucketFactory = ctx.bucketFactory;
+ this.maxRecursionLevel = ctx.maxRecursionLevel;
+ this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+ this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
+ this.random = ctx.random;
+ this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+ this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
+ this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
+ this.allowSplitfiles = ctx.allowSplitfiles;
+ this.followRedirects = ctx.followRedirects;
+ this.localRequestOnly = ctx.localRequestOnly;
+ this.splitfileUseLengths = false;
+ this.eventProducer = ctx.eventProducer;
+ this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
+ this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
+ this.starterClient = ctx.starterClient;
+ this.cacheLocalRequests = ctx.cacheLocalRequests;
+ this.returnZIPManifests = ctx.returnZIPManifests;
+ } else if(maskID == SPLITFILE_USE_LENGTHS_MASK) {
+ this.client = ctx.client;
+ this.maxOutputLength = ctx.maxOutputLength;
+ this.maxTempLength = ctx.maxTempLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
+ this.archiveManager = ctx.archiveManager;
+ this.bucketFactory = ctx.bucketFactory;
+ this.maxRecursionLevel = ctx.maxRecursionLevel;
+ this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+ this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
+ this.random = ctx.random;
+ this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+ this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
+ this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
+ this.allowSplitfiles = ctx.allowSplitfiles;
+ this.followRedirects = ctx.followRedirects;
+ this.localRequestOnly = ctx.localRequestOnly;
+ this.splitfileUseLengths = true;
+ this.eventProducer = ctx.eventProducer;
+ this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
+ this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
+ this.starterClient = ctx.starterClient;
+ this.cacheLocalRequests = ctx.cacheLocalRequests;
+ this.returnZIPManifests = ctx.returnZIPManifests;
+ } else if (maskID == SET_RETURN_ARCHIVES) {
+ this.client = ctx.client;
+ this.maxOutputLength = ctx.maxOutputLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
+ this.maxTempLength = ctx.maxTempLength;
+ this.archiveManager = ctx.archiveManager;
+ this.bucketFactory = ctx.bucketFactory;
+ this.maxRecursionLevel = ctx.maxRecursionLevel;
+ this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+ this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
+ this.random = ctx.random;
+ this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+ this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
+ this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
+ this.allowSplitfiles = ctx.allowSplitfiles;
+ this.followRedirects = ctx.followRedirects;
+ this.localRequestOnly = ctx.localRequestOnly;
+ this.splitfileUseLengths = ctx.splitfileUseLengths;
+ this.eventProducer = ctx.eventProducer;
+ this.maxDataBlocksPerSegment =
ctx.maxDataBlocksPerSegment;
+ this.maxCheckBlocksPerSegment =
ctx.maxCheckBlocksPerSegment;
+ this.starterClient = ctx.starterClient;
+ this.cacheLocalRequests = ctx.cacheLocalRequests;
+ this.returnZIPManifests = true;
+ }
+ else throw new IllegalArgumentException();
+ }
+
+ public void cancel() {
+ this.cancelled = true;
+ }
+
+ /** Make public, but just call parent for a field for field copy */
+ public Object clone() {
+ try {
+ return super.clone();
+ } catch (CloneNotSupportedException e) {
+ // Impossible
+ throw new Error(e);
+ }
+ }
+
+}
Deleted:
branches/async-client-layer/src/freenet/client/HighLevelSimpleClient.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/HighLevelSimpleClient.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,47 +0,0 @@
-package freenet.client;
-
-import java.util.HashMap;
-
-import freenet.client.events.ClientEventListener;
-import freenet.keys.FreenetURI;
-import freenet.node.RequestStarterClient;
-
-public interface HighLevelSimpleClient {
-
- /**
- * Set the maximum length of the fetched data.
- */
- public void setMaxLength(long maxLength);
-
- /**
- * Set the maximum length of any intermediate data, e.g. ZIP manifests.
- */
- public void setMaxIntermediateLength(long maxIntermediateLength);
-
- /**
- * Blocking fetch of a URI
- * @throws FetchException If there is an error fetching the data
- */
- public FetchResult fetch(FreenetURI uri) throws FetchException;
-
- /**
- * Blocking insert.
- * @throws InserterException If there is an error inserting the data
- */
- public FreenetURI insert(InsertBlock insert, boolean getCHKOnly) throws
InserterException;
-
- /**
- * Blocking insert of a redirect.
- */
- public FreenetURI insertRedirect(FreenetURI insertURI, FreenetURI
target) throws InserterException;
-
- /**
- * Blocking insert of multiple files as a manifest (or zip manifest,
etc).
- */
- public FreenetURI insertManifest(FreenetURI insertURI, HashMap
bucketsByName, String defaultName) throws InserterException;
-
- /**
- * Add a ClientEventListener.
- */
- public void addGlobalHook(ClientEventListener listener);
-}
Copied:
branches/async-client-layer/src/freenet/client/HighLevelSimpleClient.java (from
rev 7874, trunk/freenet/src/freenet/client/HighLevelSimpleClient.java)
Deleted:
branches/async-client-layer/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2006-01-18 00:45:46 UTC (rev 7871)
+++
branches/async-client-layer/src/freenet/client/HighLevelSimpleClientImpl.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,149 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.HashMap;
-
-import freenet.client.events.ClientEventListener;
-import freenet.client.events.ClientEventProducer;
-import freenet.client.events.EventLogger;
-import freenet.client.events.SimpleEventProducer;
-import freenet.crypt.RandomSource;
-import freenet.keys.ClientCHK;
-import freenet.keys.ClientKey;
-import freenet.keys.FreenetURI;
-import freenet.keys.InsertableClientSSK;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-
-public class HighLevelSimpleClientImpl implements HighLevelSimpleClient {
-
- private final SimpleLowLevelClient client;
- private final ArchiveManager archiveManager;
- private final BucketFactory bucketFactory;
- /** One CEP for all requests and inserts */
- private final ClientEventProducer globalEventProducer;
- private long curMaxLength;
- private long curMaxTempLength;
- private int curMaxMetadataLength;
- private final RandomSource random;
- private final RequestStarterClient requestStarter;
- private final RequestStarterClient insertStarter;
- /** See comments in Node */
- private final boolean cacheLocalRequests;
- static final int MAX_RECURSION = 10;
- static final int MAX_ARCHIVE_RESTARTS = 2;
- static final boolean DONT_ENTER_IMPLICIT_ARCHIVES = true;
- /** Number of threads used by a splitfile fetch */
- static final int SPLITFILE_THREADS = 20;
- /** Number of retries allowed per block in a splitfile. Must be at
least 1 as
- * on the first try we just check the datastore.
- */
- static final int SPLITFILE_BLOCK_RETRIES = 5;
- /** Number of retries allowed on non-splitfile fetches. Unlike above,
we always
- * go to network. */
- static final int NON_SPLITFILE_RETRIES = 2;
- /** Whether to fetch splitfiles. Don't turn this off! */
- static final boolean FETCH_SPLITFILES = true;
- /** Whether to follow redirects etc. If false, we only fetch a plain
block of data.
- * Don't turn this off either! */
- static final boolean FOLLOW_REDIRECTS = true;
- /** If set, only check the local datastore, don't send an actual
request out.
- * Don't turn this off either. */
- static final boolean LOCAL_REQUESTS_ONLY = false;
- static final int SPLITFILE_INSERT_THREADS = 20;
- /** Number of retries on inserts */
- static final int INSERT_RETRIES = 10;
- /** Number of RNFs on insert that make a success, or -1 on large
networks */
- static final int CONSECUTIVE_RNFS_ASSUME_SUCCESS = 2;
- // going by memory usage only; 4kB per stripe
- static final int MAX_SPLITFILE_BLOCKS_PER_SEGMENT = 1024;
- static final int MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT = 1536;
- static final int SPLITFILE_BLOCKS_PER_SEGMENT = 128;
- static final int SPLITFILE_CHECK_BLOCKS_PER_SEGMENT = 64;
-
-
- public HighLevelSimpleClientImpl(SimpleLowLevelClient client,
ArchiveManager mgr, BucketFactory bf, RandomSource r, RequestStarterClient
requestStarterClient, RequestStarterClient insertStarterClient, boolean
cacheLocalRequests) {
- this.client = client;
- archiveManager = mgr;
- bucketFactory = bf;
- random = r;
- this.globalEventProducer = new SimpleEventProducer();
- globalEventProducer.addEventListener(new
EventLogger(Logger.MINOR));
- curMaxLength = Long.MAX_VALUE;
- curMaxTempLength = Long.MAX_VALUE;
- curMaxMetadataLength = 1024 * 1024;
- this.requestStarter = requestStarterClient;
- this.insertStarter = insertStarterClient;
- this.cacheLocalRequests = cacheLocalRequests;
- }
-
- public void setMaxLength(long maxLength) {
- curMaxLength = maxLength;
- }
-
- public void setMaxIntermediateLength(long maxIntermediateLength) {
- curMaxTempLength = maxIntermediateLength;
- }
-
- /**
- * Fetch a key. Either returns the data, or throws an exception.
- */
- public FetchResult fetch(FreenetURI uri) throws FetchException {
- if(uri == null) throw new NullPointerException();
- FetcherContext context = new FetcherContext(client,
curMaxLength, curMaxTempLength, curMaxMetadataLength,
- MAX_RECURSION, MAX_ARCHIVE_RESTARTS,
DONT_ENTER_IMPLICIT_ARCHIVES,
- SPLITFILE_THREADS, SPLITFILE_BLOCK_RETRIES,
NON_SPLITFILE_RETRIES,
- FETCH_SPLITFILES, FOLLOW_REDIRECTS,
LOCAL_REQUESTS_ONLY,
- MAX_SPLITFILE_BLOCKS_PER_SEGMENT,
MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
- random, archiveManager, bucketFactory,
globalEventProducer, requestStarter, cacheLocalRequests);
- Fetcher f = new Fetcher(uri, context);
- return f.run();
- }
-
- public FreenetURI insert(InsertBlock insert, boolean getCHKOnly) throws
InserterException {
- InserterContext context = new InserterContext(client,
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
- SPLITFILE_INSERT_THREADS,
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
globalEventProducer, insertStarter, cacheLocalRequests);
- FileInserter i = new FileInserter(context);
- return i.run(insert, false, getCHKOnly, false, null);
- }
-
- public FreenetURI insert(InsertBlock insert, boolean getCHKOnly,
boolean metadata) throws InserterException {
- InserterContext context = new InserterContext(client,
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
- SPLITFILE_INSERT_THREADS,
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
globalEventProducer, insertStarter, cacheLocalRequests);
- FileInserter i = new FileInserter(context);
- return i.run(insert, metadata, getCHKOnly, false, null);
- }
-
- public FreenetURI insertRedirect(FreenetURI insertURI, FreenetURI
targetURI) throws InserterException {
- Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT, targetURI,
new ClientMetadata());
- Bucket b;
- try {
- b = BucketTools.makeImmutableBucket(bucketFactory,
m.writeToByteArray());
- } catch (IOException e) {
- Logger.error(this, "Bucket error: "+e);
- throw new
InserterException(InserterException.INTERNAL_ERROR, e, null);
- }
- ClientKey k;
- InsertBlock block = new InsertBlock(b, null, insertURI);
- InserterContext context = new InserterContext(client,
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
- SPLITFILE_INSERT_THREADS,
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
globalEventProducer, insertStarter, cacheLocalRequests);
- FileInserter i = new FileInserter(context);
- return i.run(block, true, false, false, null);
- }
-
- public FreenetURI insertManifest(FreenetURI insertURI, HashMap
bucketsByName, String defaultName) throws InserterException {
- InserterContext context = new InserterContext(client,
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
- SPLITFILE_INSERT_THREADS,
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
globalEventProducer, insertStarter, cacheLocalRequests);
- MultiFileInserter mfi = new MultiFileInserter(insertURI,
bucketsByName, context, defaultName);
- return mfi.run();
- }
-
- public void addGlobalHook(ClientEventListener listener) {
- globalEventProducer.addEventListener(listener);
- }
-}
Copied:
branches/async-client-layer/src/freenet/client/HighLevelSimpleClientImpl.java
(from rev 7874, trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java)
Modified: branches/async-client-layer/src/freenet/client/InsertSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/InsertSegment.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/InsertSegment.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -12,22 +12,22 @@
public class InsertSegment {
final FECCodec codec;
- final SplitfileBlock[] origDataBlocks;
+ final StartableSplitfileBlock[] origDataBlocks;
final int blockLength;
final BucketFactory bf;
/** Check blocks. Will be created by encode(...). */
- final SplitfileBlock[] checkBlocks;
+ final StartableSplitfileBlock[] checkBlocks;
final boolean getCHKOnly;
// just for debugging
final int segNo;
- public InsertSegment(short splitfileAlgo, SplitfileBlock[]
origDataBlocks, int blockLength, BucketFactory bf, boolean getCHKOnly, int
segNo) {
+ public InsertSegment(short splitfileAlgo, StartableSplitfileBlock[]
origDataBlocks, int blockLength, BucketFactory bf, boolean getCHKOnly, int
segNo) {
this.origDataBlocks = origDataBlocks;
codec = FECCodec.getCodec(splitfileAlgo, origDataBlocks.length);
if(codec != null)
- checkBlocks = new
SplitfileBlock[codec.countCheckBlocks()];
+ checkBlocks = new
StartableSplitfileBlock[codec.countCheckBlocks()];
else
- checkBlocks = new SplitfileBlock[0];
+ checkBlocks = new StartableSplitfileBlock[0];
this.blockLength = blockLength;
this.bf = bf;
this.getCHKOnly = getCHKOnly;
Deleted: branches/async-client-layer/src/freenet/client/InserterContext.java
===================================================================
--- trunk/freenet/src/freenet/client/InserterContext.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/InserterContext.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,46 +0,0 @@
-package freenet.client;
-
-import freenet.client.events.ClientEventProducer;
-import freenet.crypt.RandomSource;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
-import freenet.support.BucketFactory;
-
-/** Context object for an insert operation, including both simple and
multi-file inserts */
-public class InserterContext {
-
- final SimpleLowLevelClient client;
- final BucketFactory bf;
- /** If true, don't try to compress the data */
- final boolean dontCompress;
- final RandomSource random;
- final short splitfileAlgorithm;
- final int maxInsertRetries;
- final int maxSplitInsertThreads;
- final int consecutiveRNFsCountAsSuccess;
- final int splitfileSegmentDataBlocks;
- final int splitfileSegmentCheckBlocks;
- final ClientEventProducer eventProducer;
- final RequestStarterClient starterClient;
- /** Interesting tradeoff, see comments at top of Node.java. */
- final boolean cacheLocalRequests;
-
- public InserterContext(SimpleLowLevelClient client, BucketFactory bf,
RandomSource random,
- int maxRetries, int rnfsToSuccess, int maxThreads, int
splitfileSegmentDataBlocks, int splitfileSegmentCheckBlocks,
- ClientEventProducer eventProducer, RequestStarterClient
sctx, boolean cacheLocalRequests) {
- this.client = client;
- this.bf = bf;
- this.random = random;
- dontCompress = false;
- splitfileAlgorithm = Metadata.SPLITFILE_ONION_STANDARD;
- this.consecutiveRNFsCountAsSuccess = rnfsToSuccess;
- this.maxInsertRetries = maxRetries;
- this.maxSplitInsertThreads = maxThreads;
- this.eventProducer = eventProducer;
- this.splitfileSegmentDataBlocks = splitfileSegmentDataBlocks;
- this.splitfileSegmentCheckBlocks = splitfileSegmentCheckBlocks;
- this.starterClient = sctx;
- this.cacheLocalRequests = cacheLocalRequests;
- }
-
-}
Copied: branches/async-client-layer/src/freenet/client/InserterContext.java
(from rev 7874, trunk/freenet/src/freenet/client/InserterContext.java)
Deleted: branches/async-client-layer/src/freenet/client/InserterException.java
===================================================================
--- trunk/freenet/src/freenet/client/InserterException.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/InserterException.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,103 +0,0 @@
-package freenet.client;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Logger;
-
-public class InserterException extends Exception {
- private static final long serialVersionUID = -1106716067841151962L;
-
- private final int mode;
- /** For collection errors */
- public FailureCodeTracker errorCodes;
- /** If a non-serious error, the URI */
- public final FreenetURI uri;
-
- /** Get the failure mode. */
- public int getMode() {
- return mode;
- }
-
- public InserterException(int m, String msg, FreenetURI expectedURI) {
- super(getMessage(m)+": "+msg);
- mode = m;
- Logger.minor(this, "Creating InserterException:
"+getMessage(mode)+": "+msg, this);
- errorCodes = null;
- this.uri = expectedURI;
- }
-
- public InserterException(int m, FreenetURI expectedURI) {
- super(getMessage(m));
- mode = m;
- Logger.minor(this, "Creating InserterException:
"+getMessage(mode), this);
- errorCodes = null;
- this.uri = expectedURI;
- }
-
- public InserterException(int mode, Throwable e, FreenetURI expectedURI)
{
- super(getMessage(mode)+": "+e.getMessage());
- Logger.minor(this, "Creating InserterException:
"+getMessage(mode)+": "+e, e);
- this.mode = mode;
- errorCodes = null;
- initCause(e);
- this.uri = expectedURI;
- }
-
- public InserterException(int mode, FailureCodeTracker errorCodes,
FreenetURI expectedURI) {
- super(getMessage(mode));
- this.mode = mode;
- Logger.minor(this, "Creating InserterException:
"+getMessage(mode), this);
- this.errorCodes = errorCodes;
- this.uri = expectedURI;
- }
-
- public InserterException(int mode) {
- super(getMessage(mode));
- this.mode = mode;
- this.errorCodes = null;
- this.uri = null;
- }
-
- /** Caller supplied a URI we cannot use */
- public static final int INVALID_URI = 1;
- /** Failed to read from or write to a bucket; a kind of internal error
*/
- public static final int BUCKET_ERROR = 2;
- /** Internal error of some sort */
- public static final int INTERNAL_ERROR = 3;
- /** Downstream node was overloaded */
- public static final int REJECTED_OVERLOAD = 4;
- /** Couldn't find enough nodes to send the data to */
- public static final int ROUTE_NOT_FOUND = 5;
- /** There were fatal errors in a splitfile insert. */
- public static final int FATAL_ERRORS_IN_BLOCKS = 6;
- /** Could not insert a splitfile because a block failed too many times
*/
- public static final int TOO_MANY_RETRIES_IN_BLOCKS = 7;
- /** Not able to leave the node at all */
- public static final int ROUTE_REALLY_NOT_FOUND = 8;
- /** Collided with pre-existing content */
- public static final int COLLISION = 9;
-
- public static String getMessage(int mode) {
- switch(mode) {
- case INVALID_URI:
- return "Caller supplied a URI we cannot use";
- case BUCKET_ERROR:
- return "Internal bucket error: out of disk
space/permissions problem?";
- case INTERNAL_ERROR:
- return "Internal error";
- case REJECTED_OVERLOAD:
- return "A downstream node timed out or was severely
overloaded";
- case FATAL_ERRORS_IN_BLOCKS:
- return "Fatal errors in a splitfile insert";
- case TOO_MANY_RETRIES_IN_BLOCKS:
- return "Could not insert splitfile: ran out of retries
(nonfatal errors)";
- case ROUTE_NOT_FOUND:
- return "Could not propagate the insert to enough nodes
(normal on small networks, try fetching it anyway)";
- case ROUTE_REALLY_NOT_FOUND:
- return "Insert could not leave the node at all";
- case COLLISION:
- return "Insert collided with different, pre-existing
data at the same key";
- default:
- return "Unknown error "+mode;
- }
- }
-}
Copied: branches/async-client-layer/src/freenet/client/InserterException.java
(from rev 7874, trunk/freenet/src/freenet/client/InserterException.java)
Modified: branches/async-client-layer/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/Metadata.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -19,13 +19,104 @@
/** Metadata parser/writer class. */
-public class Metadata {
+public class Metadata implements Cloneable {
static final long FREENET_METADATA_MAGIC = 0xf053b2842d91482bL;
static final int MAX_SPLITFILE_PARAMS_LENGTH = 32768;
/** Soft limit, to avoid memory DoS */
static final int MAX_SPLITFILE_BLOCKS = 100*1000;
+ // Actual parsed data
+
+ // document type
+ byte documentType;
+ static final byte SIMPLE_REDIRECT = 0;
+ static final byte MULTI_LEVEL_METADATA = 1;
+ static final byte SIMPLE_MANIFEST = 2;
+ static final byte ZIP_MANIFEST = 3;
+ static final byte ZIP_INTERNAL_REDIRECT = 4;
+
+ // 2 bytes of flags
+ /** Is a splitfile */
+ boolean splitfile;
+ /** Is a DBR */
+ boolean dbr;
+ /** No MIME type; on by default as not all doctypes have MIME */
+ boolean noMIME = true;
+ /** Compressed MIME type */
+ boolean compressedMIME;
+ /** Has extra client-metadata */
+ boolean extraMetadata;
+ /** Keys stored in full (otherwise assumed to be CHKs) */
+ boolean fullKeys;
+ /** Non-final splitfile chunks can be non-full */
+ boolean splitUseLengths;
+ static final short FLAGS_SPLITFILE = 1;
+ static final short FLAGS_DBR = 2;
+ static final short FLAGS_NO_MIME = 4;
+ static final short FLAGS_COMPRESSED_MIME = 8;
+ static final short FLAGS_EXTRA_METADATA = 16;
+ static final short FLAGS_FULL_KEYS = 32;
+ static final short FLAGS_SPLIT_USE_LENGTHS = 64;
+ static final short FLAGS_COMPRESSED = 128;
+
+ /** Container archive type */
+ short archiveType;
+ static final short ARCHIVE_ZIP = 0;
+ static final short ARCHIVE_TAR = 1; // FIXME for future use
+
+ /** Compressed splitfile codec */
+ short compressionCodec = -1;
+ static public final short COMPRESS_GZIP = 0;
+ static final short COMPRESS_BZIP2 = 1; // FIXME for future use
+
+ /** The length of the splitfile */
+ long dataLength;
+ /** The decompressed length of the compressed data */
+ long decompressedLength;
+
+ /** The MIME type, as a string */
+ String mimeType;
+
+ /** The compressed MIME type - lookup index for the MIME types table.
+ * Must be between 0 and 32767.
+ */
+ short compressedMIMEValue;
+ boolean hasCompressedMIMEParams;
+ short compressedMIMEParams;
+
+ /** The simple redirect key */
+ FreenetURI simpleRedirectKey;
+
+ short splitfileAlgorithm;
+ static public final short SPLITFILE_NONREDUNDANT = 0;
+ static public final short SPLITFILE_ONION_STANDARD = 1;
+
+ /** Splitfile parameters */
+ byte[] splitfileParams;
+ int splitfileBlocks;
+ int splitfileCheckBlocks;
+ FreenetURI[] splitfileDataKeys;
+ FreenetURI[] splitfileCheckKeys;
+
+ // Manifests
+ int manifestEntryCount;
+ /** Manifest entries by name */
+ HashMap manifestEntries;
+
+ /** ZIP internal redirect: name of file in ZIP */
+ String nameInArchive;
+
+ ClientMetadata clientMetadata;
+
+ public Object clone() {
+ try {
+ return super.clone();
+ } catch (CloneNotSupportedException e) {
+ throw new Error("Yes it is!");
+ }
+ }
+
/** Parse a block of bytes into a Metadata structure.
* Constructor method because of need to catch impossible exceptions.
* @throws MetadataParseException If the metadata is invalid.
@@ -507,89 +598,6 @@
} else throw new IllegalArgumentException("Full keys
must be enabled to write non-CHKs");
}
}
- // Actual parsed data
-
- // document type
- byte documentType;
- static final byte SIMPLE_REDIRECT = 0;
- static final byte MULTI_LEVEL_METADATA = 1;
- static final byte SIMPLE_MANIFEST = 2;
- static final byte ZIP_MANIFEST = 3;
- static final byte ZIP_INTERNAL_REDIRECT = 4;
-
- // 2 bytes of flags
- /** Is a splitfile */
- boolean splitfile;
- /** Is a DBR */
- boolean dbr;
- /** No MIME type; on by default as not all doctypes have MIME */
- boolean noMIME = true;
- /** Compressed MIME type */
- boolean compressedMIME;
- /** Has extra client-metadata */
- boolean extraMetadata;
- /** Keys stored in full (otherwise assumed to be CHKs) */
- boolean fullKeys;
- /** Non-final splitfile chunks can be non-full */
- boolean splitUseLengths;
- static final short FLAGS_SPLITFILE = 1;
- static final short FLAGS_DBR = 2;
- static final short FLAGS_NO_MIME = 4;
- static final short FLAGS_COMPRESSED_MIME = 8;
- static final short FLAGS_EXTRA_METADATA = 16;
- static final short FLAGS_FULL_KEYS = 32;
- static final short FLAGS_SPLIT_USE_LENGTHS = 64;
- static final short FLAGS_COMPRESSED = 128;
-
- /** Container archive type */
- short archiveType;
- static final short ARCHIVE_ZIP = 0;
- static final short ARCHIVE_TAR = 1; // FIXME for future use
-
- /** Compressed splitfile codec */
- short compressionCodec = -1;
- static public final short COMPRESS_GZIP = 0;
- static final short COMPRESS_BZIP2 = 1; // FIXME for future use
-
- /** The length of the splitfile */
- long dataLength;
- /** The decompressed length of the compressed data */
- long decompressedLength;
-
- /** The MIME type, as a string */
- String mimeType;
-
- /** The compressed MIME type - lookup index for the MIME types table.
- * Must be between 0 and 32767.
- */
- short compressedMIMEValue;
- boolean hasCompressedMIMEParams;
- short compressedMIMEParams;
-
- /** The simple redirect key */
- FreenetURI simpleRedirectKey;
-
- short splitfileAlgorithm;
- static final short SPLITFILE_NONREDUNDANT = 0;
- static final short SPLITFILE_ONION_STANDARD = 1;
-
- /** Splitfile parameters */
- byte[] splitfileParams;
- int splitfileBlocks;
- int splitfileCheckBlocks;
- FreenetURI[] splitfileDataKeys;
- FreenetURI[] splitfileCheckKeys;
-
- // Manifests
- int manifestEntryCount;
- /** Manifest entries by name */
- HashMap manifestEntries;
-
- /** ZIP internal redirect: name of file in ZIP */
- String nameInArchive;
-
- ClientMetadata clientMetadata;
-
/** Is a manifest? */
public boolean isSimpleManifest() {
return documentType == SIMPLE_MANIFEST;
@@ -799,4 +807,20 @@
public boolean isCompressed() {
return compressionCodec >= 0;
}
+
+ public boolean splitUseLengths() {
+ return splitUseLengths;
+ }
+
+ public short getCompressionCodec() {
+ return compressionCodec;
+ }
+
+ public long dataLength() {
+ return dataLength;
+ }
+
+ public byte[] splitfileParams() {
+ return splitfileParams;
+ }
}
Modified: branches/async-client-layer/src/freenet/client/RetryTracker.java
===================================================================
--- trunk/freenet/src/freenet/client/RetryTracker.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/RetryTracker.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -26,16 +26,16 @@
* Return a random block.
* Call synchronized on RetryTracker.
*/
- SplitfileBlock getBlock() {
+ StartableSplitfileBlock getBlock() {
int len = blocks.size();
int x = random.nextInt(len);
- SplitfileBlock block = (SplitfileBlock)
blocks.remove(x);
+ StartableSplitfileBlock block =
(StartableSplitfileBlock) blocks.remove(x);
if(blocks.isEmpty())
removeLevel(level);
return block;
}
- void add(SplitfileBlock block) {
+ void add(StartableSplitfileBlock block) {
blocks.add(block);
}
@@ -44,7 +44,7 @@
* Remove self if run out of blocks.
* Call synchronized on RetryTracker.
*/
- void remove(SplitfileBlock block) {
+ void remove(StartableSplitfileBlock block) {
blocks.remove(block);
if(blocks.isEmpty())
removeLevel(level);
@@ -167,7 +167,7 @@
/**
* Add a block at retry level zero.
*/
- public synchronized void addBlock(SplitfileBlock block) {
+ public synchronized void addBlock(StartableSplitfileBlock block) {
if(killed) return;
Level l = makeLevel(0);
l.add(block);
@@ -179,7 +179,7 @@
* Move it out of the running list and back into the relevant list,
unless
* we have run out of retries.
*/
- public void nonfatalError(SplitfileBlock block, int reasonCode) {
+ public void nonfatalError(StartableSplitfileBlock block, int
reasonCode) {
synchronized(this) {
nonfatalErrors.inc(reasonCode);
runningBlocks.remove(block);
@@ -204,7 +204,7 @@
* Move it into the fatal error list.
* @param reasonCode A client-specific code indicating the type of
failure.
*/
- public void fatalError(SplitfileBlock block, int reasonCode) {
+ public void fatalError(StartableSplitfileBlock block, int reasonCode) {
synchronized(this) {
fatalErrors.inc(reasonCode);
runningBlocks.remove(block);
@@ -238,7 +238,7 @@
|| (runningBlocks.isEmpty() && levels.isEmpty()
&& finishOnEmpty)) {
killed = true;
Logger.minor(this, "Finishing");
- SplitfileBlock[] running = runningBlocks();
+ StartableSplitfileBlock[] running = runningBlocks();
for(int i=0;i<running.length;i++) {
running[i].kill();
}
@@ -253,7 +253,7 @@
}
} else {
while(runningBlocks.size() < maxThreads) {
- SplitfileBlock block = getBlock();
+ StartableSplitfileBlock block = getBlock();
if(block == null) break;
Logger.minor(this, "Starting: "+block);
block.start();
@@ -265,7 +265,7 @@
callback.finished(succeededBlocks(), failedBlocks(),
fatalErrorBlocks());
}
- public void success(SplitfileBlock block) {
+ public void success(StartableSplitfileBlock block) {
synchronized(this) {
if(killed) return;
runningBlocks.remove(block);
@@ -284,7 +284,7 @@
* Get the next block to try. This is a randomly selected block from the
* lowest priority currently available. Move it into the running list.
*/
- public synchronized SplitfileBlock getBlock() {
+ public synchronized StartableSplitfileBlock getBlock() {
if(killed) return null;
Integer iMin = new Integer(curMinLevel);
Level l = (Level) levels.get(iMin);
@@ -309,34 +309,34 @@
/**
* Get all running blocks.
*/
- public synchronized SplitfileBlock[] runningBlocks() {
- return (SplitfileBlock[])
- runningBlocks.toArray(new
SplitfileBlock[runningBlocks.size()]);
+ public synchronized StartableSplitfileBlock[] runningBlocks() {
+ return (StartableSplitfileBlock[])
+ runningBlocks.toArray(new
StartableSplitfileBlock[runningBlocks.size()]);
}
/**
* Get all blocks with fatal errors.
- * SplitfileBlock's are assumed to remember their errors, so we don't.
+ * StartableSplitfileBlock's are assumed to remember their errors, so
we don't.
*/
- public synchronized SplitfileBlock[] fatalErrorBlocks() {
- return (SplitfileBlock[])
- failedBlocksFatalErrors.toArray(new
SplitfileBlock[failedBlocksFatalErrors.size()]);
+ public synchronized StartableSplitfileBlock[] fatalErrorBlocks() {
+ return (StartableSplitfileBlock[])
+ failedBlocksFatalErrors.toArray(new
StartableSplitfileBlock[failedBlocksFatalErrors.size()]);
}
/**
* Get all blocks which didn't succeed in the maximum number of tries.
*/
- public synchronized SplitfileBlock[] failedBlocks() {
- return (SplitfileBlock[])
- failedBlocksTooManyRetries.toArray(new
SplitfileBlock[failedBlocksTooManyRetries.size()]);
+ public synchronized StartableSplitfileBlock[] failedBlocks() {
+ return (StartableSplitfileBlock[])
+ failedBlocksTooManyRetries.toArray(new
StartableSplitfileBlock[failedBlocksTooManyRetries.size()]);
}
/**
* Get all successfully downloaded blocks.
*/
- public synchronized SplitfileBlock[] succeededBlocks() {
- return (SplitfileBlock[])
- succeededBlocks.toArray(new
SplitfileBlock[succeededBlocks.size()]);
+ public synchronized StartableSplitfileBlock[] succeededBlocks() {
+ return (StartableSplitfileBlock[])
+ succeededBlocks.toArray(new
StartableSplitfileBlock[succeededBlocks.size()]);
}
public synchronized int succeededBlocksLength() {
@@ -384,7 +384,7 @@
killed = true;
levels.clear();
for(Iterator i=runningBlocks.iterator();i.hasNext();) {
- SplitfileBlock sb = (SplitfileBlock) i.next();
+ StartableSplitfileBlock sb = (StartableSplitfileBlock)
i.next();
sb.kill();
}
runningBlocks.clear();
Modified:
branches/async-client-layer/src/freenet/client/RetryTrackerCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/RetryTrackerCallback.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/RetryTrackerCallback.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -11,7 +11,7 @@
* @param failed The blocks which failed.
* @param fatalErrors The blocks which got fatal errors.
*/
- void finished(SplitfileBlock[] succeeded, SplitfileBlock[] failed,
SplitfileBlock[] fatalErrors);
+ void finished(StartableSplitfileBlock[] succeeded,
StartableSplitfileBlock[] failed, StartableSplitfileBlock[] fatalErrors);
/**
* When a block completes etc.
Deleted: branches/async-client-layer/src/freenet/client/Segment.java
===================================================================
--- trunk/freenet/src/freenet/client/Segment.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/Segment.java 2006-01-20
19:03:11 UTC (rev 7885)
@@ -1,282 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.LinkedList;
-import java.util.Vector;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-
-/**
- * A segment, within a splitfile.
- * Self-starting Runnable.
- *
- * Does not require locking, because all locking goes through the parent
Segment.
- */
-public class Segment implements RetryTrackerCallback {
-
- final short splitfileType;
- final FreenetURI[] dataBlocks;
- final FreenetURI[] checkBlocks;
- final BlockFetcher[] dataBlockStatus;
- final BlockFetcher[] checkBlockStatus;
- final int minFetched;
- private Vector blocksNotTried;
- final SplitFetcher parentFetcher;
- final ArchiveContext archiveContext;
- final FetcherContext fetcherContext;
- final long maxBlockLength;
- final boolean nonFullBlocksAllowed;
- /** Has the segment started to do something? Irreversible. */
- private boolean started;
- /** Has the segment finished processing? Irreversible. */
- private boolean finished;
- /** Bucket to store the data retrieved, after it has been decoded */
- private Bucket decodedData;
- /** Recently completed fetches */
- final LinkedList recentlyCompletedFetches;
- /** Running fetches */
- LinkedList runningFetches;
- /** Fetch context for block fetches */
- final FetcherContext blockFetchContext;
- /** Recursion level */
- final int recursionLevel;
- /** Retry tracker */
- private final RetryTracker tracker;
- private FetchException failureException;
-
- /**
- * Create a Segment.
- * @param splitfileType The type of the splitfile.
- * @param splitfileDataBlocks The data blocks to fetch.
- * @param splitfileCheckBlocks The check blocks to fetch.
- */
- public Segment(short splitfileType, FreenetURI[] splitfileDataBlocks,
FreenetURI[] splitfileCheckBlocks,
- SplitFetcher fetcher, ArchiveContext actx,
FetcherContext fctx, long maxTempLength, boolean useLengths, int recLevel)
throws MetadataParseException {
- this.splitfileType = splitfileType;
- dataBlocks = splitfileDataBlocks;
- checkBlocks = splitfileCheckBlocks;
- if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
- minFetched = dataBlocks.length;
- } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
- minFetched = dataBlocks.length;
- } else throw new MetadataParseException("Unknown splitfile
type"+splitfileType);
- tracker = new RetryTracker(fctx.maxSplitfileBlockRetries,
splitfileDataBlocks.length, fctx.random, fctx.maxSplitfileThreads, false, this,
false);
- // Don't add blocks to tracker yet, because don't want to start
fetch yet.
- parentFetcher = fetcher;
- archiveContext = actx;
- fetcherContext = fctx;
- maxBlockLength = maxTempLength;
- nonFullBlocksAllowed = useLengths;
- started = false;
- finished = false;
- decodedData = null;
- dataBlockStatus = new BlockFetcher[dataBlocks.length];
- checkBlockStatus = new BlockFetcher[checkBlocks.length];
- blocksNotTried = new Vector();
- Vector firstSet = new
Vector(dataBlocks.length+checkBlocks.length);
- blocksNotTried.add(0, firstSet);
- for(int i=0;i<dataBlocks.length;i++) {
- dataBlockStatus[i] = new BlockFetcher(this, tracker,
dataBlocks[i], i, fctx.dontEnterImplicitArchives);
- firstSet.add(dataBlockStatus[i]);
- }
- for(int i=0;i<checkBlocks.length;i++) {
- checkBlockStatus[i] = new BlockFetcher(this, tracker,
checkBlocks[i], dataBlockStatus.length + i, fctx.dontEnterImplicitArchives);
- firstSet.add(checkBlockStatus[i]);
- }
- recentlyCompletedFetches = new LinkedList();
- runningFetches = new LinkedList();
- // FIXME be a bit more flexible here depending on flags
- if(useLengths) {
- blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
- this.recursionLevel = recLevel;
- } else {
- blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
- this.recursionLevel = 0;
- }
- Logger.minor(this, "Created segment: data blocks:
"+dataBlocks.length+", check blocks: "+checkBlocks.length+" "+this);
- }
-
- /**
- * Is the segment finished? (Either error or fetched and decoded)?
- */
- public boolean isFinished() {
- return finished;
- }
-
- /**
- * If there was an error, throw it now.
- */
- public void throwError() throws FetchException {
- if(failureException != null)
- throw failureException;
- }
-
- /**
- * Return the length of the data, after decoding.
- */
- public long decodedLength() {
- return decodedData.size();
- }
-
- /**
- * Write the decoded data to the given output stream.
- * Do not write more than the specified number of bytes (unless it is
negative,
- * in which case ignore it).
- * @return The number of bytes written.
- * @throws IOException If there was an error reading from the bucket
the data is
- * stored in, or writing to the stream provided.
- */
- public long writeDecodedDataTo(OutputStream os, long truncateLength)
throws IOException {
- long len = decodedData.size();
- if(truncateLength >= 0 && truncateLength < len)
- len = truncateLength;
- BucketTools.copyTo(decodedData, os, Math.min(truncateLength,
decodedData.size()));
- return len;
- }
-
- /**
- * Return true if the Segment has been started, otherwise false.
- */
- public boolean isStarted() {
- return started;
- }
-
- /**
- * Start the Segment fetching the data. When it has finished fetching,
it will
- * notify the SplitFetcher. Note that we must not start fetching until
this
- * method is called, because of the requirement to not fetch all
segments
- * simultaneously.
- */
- public void start() {
- started = true;
- for(int i=0;i<dataBlockStatus.length;i++) {
- tracker.addBlock(dataBlockStatus[i]);
- }
- Logger.minor(this, "Added data blocks");
- for(int i=0;i<checkBlockStatus.length;i++) {
- tracker.addBlock(checkBlockStatus[i]);
- }
- tracker.callOnProgress();
- tracker.setFinishOnEmpty();
- }
-
- /**
- * How many fetches are running?
- */
- private int runningFetches() {
- synchronized(runningFetches) {
- return runningFetches.size();
- }
- }
-
- /**
- * Once we have enough data to decode, tell parent, and decode it.
- */
- public void finished(SplitfileBlock[] succeeded, SplitfileBlock[]
failed, SplitfileBlock[] fatalErrors) {
-
- Logger.minor(this, "Finished("+succeeded.length+",
"+failed.length+", "+fatalErrors.length+")");
- parentFetcher.gotBlocks(this);
- if(succeeded.length >= minFetched)
- // Not finished yet, need to decode
- try {
- successfulFetch();
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t+" decoding
"+this);
- finished = true;
- failureException = new
FetchException(FetchException.INTERNAL_ERROR, t);
- parentFetcher.segmentFinished(this);
- }
- else {
- failureException = new
SplitFetchException(failed.length, fatalErrors.length, succeeded.length,
minFetched,
tracker.getAccumulatedNonFatalErrorCodes().merge(tracker.getAccumulatedFatalErrorCodes()));
- finished = true;
- parentFetcher.segmentFinished(this);
- }
- }
-
- /**
- * Successful fetch, do the decode, tell the parent, etc.
- */
- private void successfulFetch() {
-
- // Now decode
- Logger.minor(this, "Decoding "+this);
-
- FECCodec codec = FECCodec.getCodec(splitfileType,
dataBlocks.length, checkBlocks.length);
- try {
- if(splitfileType != Metadata.SPLITFILE_NONREDUNDANT) {
- // FIXME hardcoded block size below.
- codec.decode(dataBlockStatus, checkBlockStatus,
32768, fetcherContext.bucketFactory);
- // Now have all the data blocks (not
necessarily all the check blocks)
- }
-
- decodedData =
fetcherContext.bucketFactory.makeBucket(-1);
- Logger.minor(this, "Copying data from data blocks");
- OutputStream os = decodedData.getOutputStream();
- for(int i=0;i<dataBlockStatus.length;i++) {
- BlockFetcher status = dataBlockStatus[i];
- Bucket data = status.fetchedData;
- BucketTools.copyTo(data, os, Long.MAX_VALUE);
- }
- Logger.minor(this, "Copied data");
- os.close();
- // Must set finished BEFORE calling parentFetcher.
- // Otherwise a race is possible that might result in it
not seeing our finishing.
- finished = true;
- parentFetcher.segmentFinished(this);
- } catch (IOException e) {
- Logger.minor(this, "Caught bucket error?: "+e, e);
- finished = true;
- failureException = new
FetchException(FetchException.BUCKET_ERROR);
- parentFetcher.segmentFinished(this);
- return;
- }
-
- // Now heal
-
- // Encode any check blocks we don't have
- if(codec != null) {
- try {
- codec.encode(dataBlockStatus, checkBlockStatus,
32768, fetcherContext.bucketFactory);
- } catch (IOException e) {
- Logger.error(this, "Bucket error while healing:
"+e, e);
- }
- }
-
- // Now insert *ALL* blocks on which we had at least one
failure, and didn't eventually succeed
- for(int i=0;i<dataBlockStatus.length;i++) {
- BlockFetcher block = dataBlockStatus[i];
- if(block.actuallyFetched) continue;
- if(block.completedTries == 0) {
- // 80% chance of not inserting, if we never
tried it
- if(fetcherContext.random.nextInt(5) == 0)
continue;
- }
- block.queueHeal();
- }
-
- // FIXME heal check blocks too
- }
-
- public void onProgress() {
- parentFetcher.onProgress();
- }
-
- public int fetchedBlocks() {
- return tracker.succeededBlocksLength();
- }
-
- public int failedBlocks() {
- return tracker.failedBlocks().length;
- }
-
- public int fatallyFailedBlocks() {
- return tracker.fatalErrorBlocks().length;
- }
-
- public int runningBlocks() {
- return tracker.runningBlocks().length;
- }
-}
Copied: branches/async-client-layer/src/freenet/client/Segment.java (from rev
7873, trunk/freenet/src/freenet/client/Segment.java)
===================================================================
--- trunk/freenet/src/freenet/client/Segment.java 2006-01-18 16:38:29 UTC
(rev 7873)
+++ branches/async-client-layer/src/freenet/client/Segment.java 2006-01-20
19:03:11 UTC (rev 7885)
@@ -0,0 +1,270 @@
+package freenet.client;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.LinkedList;
+import java.util.Vector;
+
+import freenet.keys.FreenetURI;
+import freenet.support.Bucket;
+import freenet.support.BucketTools;
+import freenet.support.Logger;
+
+/**
+ * A segment, within a splitfile.
+ * Self-starting Runnable.
+ *
+ * Does not require locking, because all locking goes through the parent
Segment.
+ */
+public class Segment implements RetryTrackerCallback {
+
+ final short splitfileType;
+ final FreenetURI[] dataBlocks;
+ final FreenetURI[] checkBlocks;
+ final BlockFetcher[] dataBlockStatus;
+ final BlockFetcher[] checkBlockStatus;
+ final int minFetched;
+ final SplitFetcher parentFetcher;
+ final ArchiveContext archiveContext;
+ final FetcherContext fetcherContext;
+ final long maxBlockLength;
+ final boolean nonFullBlocksAllowed;
+ /** Has the segment started to do something? Irreversible. */
+ private boolean started;
+ /** Has the segment finished processing? Irreversible. */
+ private boolean finished;
+ /** Bucket to store the data retrieved, after it has been decoded */
+ private Bucket decodedData;
+ /** Fetch context for block fetches */
+ final FetcherContext blockFetchContext;
+ /** Recursion level */
+ final int recursionLevel;
+ /** Retry tracker */
+ private final RetryTracker tracker;
+ private FetchException failureException;
+
+ /**
+ * Create a Segment.
+ * @param splitfileType The type of the splitfile.
+ * @param splitfileDataBlocks The data blocks to fetch.
+ * @param splitfileCheckBlocks The check blocks to fetch.
+ */
+ public Segment(short splitfileType, FreenetURI[] splitfileDataBlocks,
FreenetURI[] splitfileCheckBlocks,
+ SplitFetcher fetcher, ArchiveContext actx,
FetcherContext fctx, long maxTempLength, boolean useLengths, int recLevel)
throws MetadataParseException {
+ this.splitfileType = splitfileType;
+ dataBlocks = splitfileDataBlocks;
+ checkBlocks = splitfileCheckBlocks;
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
+ minFetched = dataBlocks.length;
+ } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
+ minFetched = dataBlocks.length;
+ } else throw new MetadataParseException("Unknown splitfile
type"+splitfileType);
+ tracker = new RetryTracker(fctx.maxSplitfileBlockRetries,
splitfileDataBlocks.length, fctx.random, fctx.maxSplitfileThreads, false, this,
false);
+ // Don't add blocks to tracker yet, because don't want to start
fetch yet.
+ parentFetcher = fetcher;
+ archiveContext = actx;
+ fetcherContext = fctx;
+ maxBlockLength = maxTempLength;
+ nonFullBlocksAllowed = useLengths;
+ started = false;
+ finished = false;
+ decodedData = null;
+ dataBlockStatus = new BlockFetcher[dataBlocks.length];
+ checkBlockStatus = new BlockFetcher[checkBlocks.length];
+ Vector firstSet = new
Vector(dataBlocks.length+checkBlocks.length);
+ for(int i=0;i<dataBlocks.length;i++) {
+ dataBlockStatus[i] = new BlockFetcher(this, tracker,
dataBlocks[i], i, fctx.dontEnterImplicitArchives);
+ firstSet.add(dataBlockStatus[i]);
+ }
+ for(int i=0;i<checkBlocks.length;i++) {
+ checkBlockStatus[i] = new BlockFetcher(this, tracker,
checkBlocks[i], dataBlockStatus.length + i, fctx.dontEnterImplicitArchives);
+ firstSet.add(checkBlockStatus[i]);
+ }
+ // FIXME be a bit more flexible here depending on flags
+ if(useLengths) {
+ blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
+ this.recursionLevel = recLevel;
+ } else {
+ blockFetchContext = new FetcherContext(fetcherContext,
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
+ this.recursionLevel = 0;
+ }
+ Logger.minor(this, "Created segment: data blocks:
"+dataBlocks.length+", check blocks: "+checkBlocks.length+" "+this);
+ }
+
+ /**
+ * Is the segment finished? (Either error or fetched and decoded)?
+ */
+ public boolean isFinished() {
+ return finished;
+ }
+
+ /**
+ * If there was an error, throw it now.
+ */
+ public void throwError() throws FetchException {
+ if(failureException != null)
+ throw failureException;
+ }
+
+ /**
+ * Return the length of the data, after decoding.
+ */
+ public long decodedLength() {
+ return decodedData.size();
+ }
+
+ /**
+ * Write the decoded data to the given output stream.
+ * Do not write more than the specified number of bytes (unless it is
negative,
+ * in which case ignore it).
+ * @return The number of bytes written.
+ * @throws IOException If there was an error reading from the bucket
the data is
+ * stored in, or writing to the stream provided.
+ */
+ public long writeDecodedDataTo(OutputStream os, long truncateLength)
throws IOException {
+ long len = decodedData.size();
+ if(truncateLength >= 0 && truncateLength < len)
+ len = truncateLength;
+ BucketTools.copyTo(decodedData, os, Math.min(truncateLength,
decodedData.size()));
+ return len;
+ }
+
+ /**
+ * Return true if the Segment has been started, otherwise false.
+ */
+ public boolean isStarted() {
+ return started;
+ }
+
+ /**
+ * Start the Segment fetching the data. When it has finished fetching,
it will
+ * notify the SplitFetcher. Note that we must not start fetching until
this
+ * method is called, because of the requirement to not fetch all
segments
+ * simultaneously.
+ */
+ public void start() {
+ started = true;
+ for(int i=0;i<dataBlockStatus.length;i++) {
+ tracker.addBlock(dataBlockStatus[i]);
+ }
+ Logger.minor(this, "Added data blocks");
+ for(int i=0;i<checkBlockStatus.length;i++) {
+ tracker.addBlock(checkBlockStatus[i]);
+ }
+ tracker.callOnProgress();
+ tracker.setFinishOnEmpty();
+ }
+
+ /**
+ * Once we have enough data to decode, tell parent, and decode it.
+ */
+ public void finished(StartableSplitfileBlock[] succeeded,
StartableSplitfileBlock[] failed, StartableSplitfileBlock[] fatalErrors) {
+
+ Logger.minor(this, "Finished("+succeeded.length+",
"+failed.length+", "+fatalErrors.length+")");
+ parentFetcher.gotBlocks(this);
+ if(succeeded.length >= minFetched)
+ // Not finished yet, need to decode
+ try {
+ successfulFetch();
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+" decoding
"+this);
+ finished = true;
+ failureException = new
FetchException(FetchException.INTERNAL_ERROR, t);
+ parentFetcher.segmentFinished(this);
+ }
+ else {
+ failureException = new
SplitFetchException(failed.length, fatalErrors.length, succeeded.length,
minFetched,
tracker.getAccumulatedNonFatalErrorCodes().merge(tracker.getAccumulatedFatalErrorCodes()));
+ finished = true;
+ parentFetcher.segmentFinished(this);
+ }
+ }
+
+ /**
+ * Successful fetch, do the decode, tell the parent, etc.
+ */
+ private void successfulFetch() {
+
+ // Now decode
+ Logger.minor(this, "Decoding "+this);
+
+ FECCodec codec = FECCodec.getCodec(splitfileType,
dataBlocks.length, checkBlocks.length);
+ try {
+ if(splitfileType != Metadata.SPLITFILE_NONREDUNDANT) {
+ // FIXME hardcoded block size below.
+ codec.decode(dataBlockStatus, checkBlockStatus,
32768, fetcherContext.bucketFactory);
+ // Now have all the data blocks (not
necessarily all the check blocks)
+ }
+
+ decodedData =
fetcherContext.bucketFactory.makeBucket(-1);
+ Logger.minor(this, "Copying data from data blocks");
+ OutputStream os = decodedData.getOutputStream();
+ for(int i=0;i<dataBlockStatus.length;i++) {
+ BlockFetcher status = dataBlockStatus[i];
+ Bucket data = status.fetchedData;
+ BucketTools.copyTo(data, os, Long.MAX_VALUE);
+ }
+ Logger.minor(this, "Copied data");
+ os.close();
+ // Must set finished BEFORE calling parentFetcher.
+ // Otherwise a race is possible that might result in it
not seeing our finishing.
+ finished = true;
+ parentFetcher.segmentFinished(this);
+ } catch (IOException e) {
+ Logger.minor(this, "Caught bucket error?: "+e, e);
+ finished = true;
+ failureException = new
FetchException(FetchException.BUCKET_ERROR);
+ parentFetcher.segmentFinished(this);
+ return;
+ }
+
+ // Now heal
+
+ // Encode any check blocks we don't have
+ if(codec != null) {
+ try {
+ codec.encode(dataBlockStatus, checkBlockStatus,
32768, fetcherContext.bucketFactory);
+ } catch (IOException e) {
+ Logger.error(this, "Bucket error while healing:
"+e, e);
+ }
+ }
+
+ // Now insert *ALL* blocks on which we had at least one
failure, and didn't eventually succeed
+ for(int i=0;i<dataBlockStatus.length;i++) {
+ BlockFetcher block = dataBlockStatus[i];
+ if(block.actuallyFetched) continue;
+ if(block.completedTries == 0) {
+ // 80% chance of not inserting, if we never
tried it
+ if(fetcherContext.random.nextInt(5) == 0)
continue;
+ }
+ block.queueHeal();
+ }
+
+ // FIXME heal check blocks too
+ }
+
+ public void onProgress() {
+ if(fetcherContext.isCancelled()) {
+ finished = true;
+ tracker.kill();
+ failureException = new
FetchException(FetchException.CANCELLED);
+ parentFetcher.gotBlocks(this);
+ }
+ parentFetcher.onProgress();
+ }
+
+ public int fetchedBlocks() {
+ return tracker.succeededBlocksLength();
+ }
+
+ public int failedBlocks() {
+ return tracker.failedBlocks().length;
+ }
+
+ public int fatallyFailedBlocks() {
+ return tracker.fatalErrorBlocks().length;
+ }
+
+ public int runningBlocks() {
+ return tracker.runningBlocks().length;
+ }
+}
Deleted: branches/async-client-layer/src/freenet/client/SplitFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/SplitFetcher.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,253 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Vector;
-
-import com.onionnetworks.fec.FECCode;
-import com.onionnetworks.fec.FECCodeFactory;
-
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.keys.NodeCHK;
-import freenet.support.Bucket;
-import freenet.support.Fields;
-import freenet.support.Logger;
-
-/**
- * Class to fetch a splitfile.
- */
-public class SplitFetcher {
-
- // 128/192. Crazy, but it's possible we'd get big erasures.
- static final int ONION_STD_K = 128;
- static final int ONION_STD_N = 192;
-
- /** The standard onion codec */
- static FECCode onionStandardCode =
-
FECCodeFactory.getDefault().createFECCode(ONION_STD_K,ONION_STD_N);
-
- /** The splitfile type. See the SPLITFILE_ constants on Metadata. */
- final short splitfileType;
- /** The segment length. -1 means not segmented and must get everything
to decode. */
- final int blocksPerSegment;
- /** The segment length in check blocks. */
- final int checkBlocksPerSegment;
- /** Total number of segments */
- final int segmentCount;
- /** The detailed information on each segment */
- final Segment[] segments;
- /** The splitfile data blocks. */
- final FreenetURI[] splitfileDataBlocks;
- /** The splitfile check blocks. */
- final FreenetURI[] splitfileCheckBlocks;
- /** The archive context */
- final ArchiveContext actx;
- /** The fetch context */
- final FetcherContext fctx;
- /** Maximum temporary length */
- final long maxTempLength;
- /** Have all segments finished? Access synchronized. */
- private boolean allSegmentsFinished = false;
- /** Currently fetching segment */
- private Segment fetchingSegment;
- /** Array of unstarted segments. Modify synchronized. */
- private Vector unstartedSegments;
- /** Override length. If this is positive, truncate the splitfile to
this length. */
- private long overrideLength;
- /** Accept non-full splitfile chunks? */
- private boolean splitUseLengths;
-
- public SplitFetcher(Metadata metadata, ArchiveContext archiveContext,
FetcherContext ctx, int recursionLevel) throws MetadataParseException,
FetchException {
- actx = archiveContext;
- fctx = ctx;
- overrideLength = metadata.dataLength;
- this.maxTempLength = ctx.maxTempLength;
- splitfileType = metadata.getSplitfileType();
- splitfileDataBlocks = metadata.getSplitfileDataKeys();
- splitfileCheckBlocks = metadata.getSplitfileCheckKeys();
- splitUseLengths = metadata.splitUseLengths;
- int blockLength = splitUseLengths ? -1 : NodeCHK.BLOCK_SIZE;
- if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
- // Don't need to do much - just fetch everything and
piece it together.
- blocksPerSegment = -1;
- checkBlocksPerSegment = -1;
- segmentCount = 1;
- } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
- byte[] params = metadata.splitfileParams;
- if(params == null || params.length < 8)
- throw new MetadataParseException("No splitfile
params");
- blocksPerSegment = Fields.bytesToInt(params, 0);
- checkBlocksPerSegment = Fields.bytesToInt(params, 4);
- if(blocksPerSegment > ctx.maxDataBlocksPerSegment
- || checkBlocksPerSegment >
ctx.maxCheckBlocksPerSegment)
- throw new
FetchException(FetchException.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per
segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
- segmentCount = (splitfileDataBlocks.length /
blocksPerSegment) +
- (splitfileDataBlocks.length % blocksPerSegment
== 0 ? 0 : 1);
- // Onion, 128/192.
- // Will be segmented.
- } else throw new MetadataParseException("Unknown splitfile
format: "+splitfileType);
- Logger.minor(this, "Algorithm: "+splitfileType+", blocks per
segment: "+blocksPerSegment+", check blocks per segment:
"+checkBlocksPerSegment+", segments: "+segmentCount);
- segments = new Segment[segmentCount]; // initially null on all
entries
- if(segmentCount == 1) {
- segments[0] = new Segment(splitfileType,
splitfileDataBlocks, splitfileCheckBlocks, this, archiveContext, ctx,
maxTempLength, splitUseLengths, recursionLevel+1);
- } else {
- int dataBlocksPtr = 0;
- int checkBlocksPtr = 0;
- for(int i=0;i<segments.length;i++) {
- // Create a segment. Give it its keys.
- int copyDataBlocks =
Math.min(splitfileDataBlocks.length - dataBlocksPtr, blocksPerSegment);
- int copyCheckBlocks =
Math.min(splitfileCheckBlocks.length - checkBlocksPtr, checkBlocksPerSegment);
- FreenetURI[] dataBlocks = new
FreenetURI[copyDataBlocks];
- FreenetURI[] checkBlocks = new
FreenetURI[copyCheckBlocks];
- if(copyDataBlocks > 0)
- System.arraycopy(splitfileDataBlocks,
dataBlocksPtr, dataBlocks, 0, copyDataBlocks);
- if(copyCheckBlocks > 0)
- System.arraycopy(splitfileCheckBlocks,
checkBlocksPtr, checkBlocks, 0, copyCheckBlocks);
- dataBlocksPtr += copyDataBlocks;
- checkBlocksPtr += copyCheckBlocks;
- segments[i] = new Segment(splitfileType,
dataBlocks, checkBlocks, this, archiveContext, ctx, maxTempLength,
splitUseLengths, blockLength);
- }
- }
- unstartedSegments = new Vector();
- for(int i=0;i<segments.length;i++)
- unstartedSegments.add(segments[i]);
- Logger.minor(this, "Segments: "+unstartedSegments.size()+",
data keys: "+splitfileDataBlocks.length+", check keys:
"+(splitfileCheckBlocks==null?0:splitfileCheckBlocks.length));
- }
-
- /**
- * Fetch the splitfile.
- * Fetch one segment, while decoding the previous one.
- * Fetch the segments in random order.
- * When everything has been fetched and decoded, return the full data.
- * @throws FetchException
- */
- public Bucket fetch() throws FetchException {
- /*
- * While(true) {
- * Pick a random segment, start it fetching.
- * Wait for a segment to finish fetching, a segment to
finish decoding, or an error.
- * If a segment finishes fetching:
- * Continue to start another one if there are any
left
- * If a segment finishes decoding:
- * If all segments are decoded, assemble all the
segments and return the data.
- *
- * Segments are expected to automatically start decoding when
they finish fetching,
- * but to tell us either way.
- */
- while(true) {
- synchronized(this) {
- if(fetchingSegment == null) {
- // Pick a random segment
- fetchingSegment =
chooseUnstartedSegment();
- if(fetchingSegment == null) {
- // All segments have started
- } else {
- fetchingSegment.start();
- }
- }
- if(allSegmentsFinished) {
- return finalStatus();
- }
- try {
- wait(10*1000); // or wait()?
- } catch (InterruptedException e) {
- // Ignore
- }
- }
- }
- }
-
- private Segment chooseUnstartedSegment() {
- synchronized(unstartedSegments) {
- if(unstartedSegments.isEmpty()) return null;
- int x = fctx.random.nextInt(unstartedSegments.size());
- Logger.minor(this, "Starting segment "+x+" of
"+unstartedSegments.size());
- Segment s = (Segment) unstartedSegments.remove(x);
- return s;
- }
- }
-
- /** Return the final status of the fetch. Throws an exception, or
returns a
- * Bucket containing the fetched data.
- * @throws FetchException If the fetch failed for some reason.
- */
- private Bucket finalStatus() throws FetchException {
- long finalLength = 0;
- for(int i=0;i<segments.length;i++) {
- Segment s = segments[i];
- if(!s.isFinished()) throw new
IllegalStateException("Not all finished");
- s.throwError();
- // If still here, it succeeded
- finalLength += s.decodedLength();
- // Healing is done by Segment
- }
- if(finalLength > overrideLength)
- finalLength = overrideLength;
-
- long bytesWritten = 0;
- OutputStream os = null;
- Bucket output;
- try {
- output = fctx.bucketFactory.makeBucket(finalLength);
- os = output.getOutputStream();
- for(int i=0;i<segments.length;i++) {
- Segment s = segments[i];
- long max = (finalLength < 0 ? 0 : (finalLength
- bytesWritten));
- bytesWritten += s.writeDecodedDataTo(os, max);
- }
- } catch (IOException e) {
- throw new FetchException(FetchException.BUCKET_ERROR,
e);
- } finally {
- if(os != null) {
- try {
- os.close();
- } catch (IOException e) {
- // If it fails to close it may return
corrupt data.
- throw new
FetchException(FetchException.BUCKET_ERROR, e);
- }
- }
- }
- return output;
- }
-
- public void gotBlocks(Segment segment) {
- Logger.minor(this, "Got blocks for segment: "+segment);
- synchronized(this) {
- fetchingSegment = null;
- notifyAll();
- }
- }
-
- public void segmentFinished(Segment segment) {
- Logger.minor(this, "Finished segment: "+segment);
- synchronized(this) {
- boolean allDone = true;
- for(int i=0;i<segments.length;i++)
- if(!segments[i].isFinished()) {
- Logger.minor(this, "Segment
"+segments[i]+" is not finished");
- allDone = false;
- }
- if(allDone) allSegmentsFinished = true;
- notifyAll();
- }
- }
-
- public void onProgress() {
- int totalBlocks = splitfileDataBlocks.length;
- int fetchedBlocks = 0;
- int failedBlocks = 0;
- int fatallyFailedBlocks = 0;
- int runningBlocks = 0;
- for(int i=0;i<segments.length;i++) {
- Logger.minor(this, "Segment: "+segments[i]+":
fetched="+segments[i].fetchedBlocks()+", failedBlocks:
"+segments[i].failedBlocks()+
- ", fatally:
"+segments[i].fatallyFailedBlocks()+", running: "+segments[i].runningBlocks());
- fetchedBlocks += segments[i].fetchedBlocks();
- failedBlocks += segments[i].failedBlocks();
- fatallyFailedBlocks +=
segments[i].fatallyFailedBlocks();
- runningBlocks += segments[i].runningBlocks();
- }
- fctx.eventProducer.produceEvent(new
SplitfileProgressEvent(totalBlocks, fetchedBlocks, failedBlocks,
fatallyFailedBlocks, runningBlocks));
- }
-
-}
Copied: branches/async-client-layer/src/freenet/client/SplitFetcher.java (from
rev 7873, trunk/freenet/src/freenet/client/SplitFetcher.java)
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java 2006-01-18 16:38:29 UTC
(rev 7873)
+++ branches/async-client-layer/src/freenet/client/SplitFetcher.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -0,0 +1,244 @@
+package freenet.client;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Vector;
+
+import freenet.client.events.SplitfileProgressEvent;
+import freenet.keys.FreenetURI;
+import freenet.keys.NodeCHK;
+import freenet.support.Bucket;
+import freenet.support.Fields;
+import freenet.support.Logger;
+
+/**
+ * Class to fetch a splitfile.
+ */
+public class SplitFetcher {
+
+ /** The splitfile type. See the SPLITFILE_ constants on Metadata. */
+ final short splitfileType;
+ /** The segment length. -1 means not segmented and must get everything
to decode. */
+ final int blocksPerSegment;
+ /** The segment length in check blocks. */
+ final int checkBlocksPerSegment;
+ /** Total number of segments */
+ final int segmentCount;
+ /** The detailed information on each segment */
+ final Segment[] segments;
+ /** The splitfile data blocks. */
+ final FreenetURI[] splitfileDataBlocks;
+ /** The splitfile check blocks. */
+ final FreenetURI[] splitfileCheckBlocks;
+ /** The archive context */
+ final ArchiveContext actx;
+ /** The fetch context */
+ final FetcherContext fctx;
+ /** Maximum temporary length */
+ final long maxTempLength;
+ /** Have all segments finished? Access synchronized. */
+ private boolean allSegmentsFinished = false;
+ /** Currently fetching segment */
+ private Segment fetchingSegment;
+ /** Array of unstarted segments. Modify synchronized. */
+ private Vector unstartedSegments;
+ /** Override length. If this is positive, truncate the splitfile to
this length. */
+ private long overrideLength;
+ /** Accept non-full splitfile chunks? */
+ private boolean splitUseLengths;
+
+ public SplitFetcher(Metadata metadata, ArchiveContext archiveContext,
FetcherContext ctx, int recursionLevel) throws MetadataParseException,
FetchException {
+ actx = archiveContext;
+ fctx = ctx;
+ if(fctx.isCancelled()) throw new
FetchException(FetchException.CANCELLED);
+ overrideLength = metadata.dataLength;
+ this.maxTempLength = ctx.maxTempLength;
+ splitfileType = metadata.getSplitfileType();
+ splitfileDataBlocks = metadata.getSplitfileDataKeys();
+ splitfileCheckBlocks = metadata.getSplitfileCheckKeys();
+ splitUseLengths = metadata.splitUseLengths;
+ int blockLength = splitUseLengths ? -1 : NodeCHK.BLOCK_SIZE;
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
+ // Don't need to do much - just fetch everything and
piece it together.
+ blocksPerSegment = -1;
+ checkBlocksPerSegment = -1;
+ segmentCount = 1;
+ } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
+ byte[] params = metadata.splitfileParams;
+ if(params == null || params.length < 8)
+ throw new MetadataParseException("No splitfile
params");
+ blocksPerSegment = Fields.bytesToInt(params, 0);
+ checkBlocksPerSegment = Fields.bytesToInt(params, 4);
+ if(blocksPerSegment > ctx.maxDataBlocksPerSegment
+ || checkBlocksPerSegment >
ctx.maxCheckBlocksPerSegment)
+ throw new
FetchException(FetchException.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per
segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
+ segmentCount = (splitfileDataBlocks.length /
blocksPerSegment) +
+ (splitfileDataBlocks.length % blocksPerSegment
== 0 ? 0 : 1);
+ // Onion, 128/192.
+ // Will be segmented.
+ } else throw new MetadataParseException("Unknown splitfile
format: "+splitfileType);
+ Logger.minor(this, "Algorithm: "+splitfileType+", blocks per
segment: "+blocksPerSegment+", check blocks per segment:
"+checkBlocksPerSegment+", segments: "+segmentCount);
+ segments = new Segment[segmentCount]; // initially null on all
entries
+ if(segmentCount == 1) {
+ segments[0] = new Segment(splitfileType,
splitfileDataBlocks, splitfileCheckBlocks, this, archiveContext, ctx,
maxTempLength, splitUseLengths, recursionLevel+1);
+ } else {
+ int dataBlocksPtr = 0;
+ int checkBlocksPtr = 0;
+ for(int i=0;i<segments.length;i++) {
+ // Create a segment. Give it its keys.
+ int copyDataBlocks =
Math.min(splitfileDataBlocks.length - dataBlocksPtr, blocksPerSegment);
+ int copyCheckBlocks =
Math.min(splitfileCheckBlocks.length - checkBlocksPtr, checkBlocksPerSegment);
+ FreenetURI[] dataBlocks = new
FreenetURI[copyDataBlocks];
+ FreenetURI[] checkBlocks = new
FreenetURI[copyCheckBlocks];
+ if(copyDataBlocks > 0)
+ System.arraycopy(splitfileDataBlocks,
dataBlocksPtr, dataBlocks, 0, copyDataBlocks);
+ if(copyCheckBlocks > 0)
+ System.arraycopy(splitfileCheckBlocks,
checkBlocksPtr, checkBlocks, 0, copyCheckBlocks);
+ dataBlocksPtr += copyDataBlocks;
+ checkBlocksPtr += copyCheckBlocks;
+ segments[i] = new Segment(splitfileType,
dataBlocks, checkBlocks, this, archiveContext, ctx, maxTempLength,
splitUseLengths, blockLength);
+ }
+ }
+ unstartedSegments = new Vector();
+ for(int i=0;i<segments.length;i++)
+ unstartedSegments.add(segments[i]);
+ Logger.minor(this, "Segments: "+unstartedSegments.size()+",
data keys: "+splitfileDataBlocks.length+", check keys:
"+(splitfileCheckBlocks==null?0:splitfileCheckBlocks.length));
+ }
+
+ /**
+ * Fetch the splitfile.
+ * Fetch one segment, while decoding the previous one.
+ * Fetch the segments in random order.
+ * When everything has been fetched and decoded, return the full data.
+ * @throws FetchException
+ */
+ public Bucket fetch() throws FetchException {
+ /*
+ * While(true) {
+ * Pick a random segment, start it fetching.
+ * Wait for a segment to finish fetching, a segment to
finish decoding, or an error.
+ * If a segment finishes fetching:
+ * Continue to start another one if there are any
left
+ * If a segment finishes decoding:
+ * If all segments are decoded, assemble all the
segments and return the data.
+ *
+ * Segments are expected to automatically start decoding when
they finish fetching,
+ * but to tell us either way.
+ */
+ while(true) {
+ synchronized(this) {
+ if(fctx.isCancelled()) throw new
FetchException(FetchException.CANCELLED);
+ if(fetchingSegment == null) {
+ // Pick a random segment
+ fetchingSegment =
chooseUnstartedSegment();
+ if(fetchingSegment == null) {
+ // All segments have started
+ } else {
+ fetchingSegment.start();
+ }
+ }
+ if(allSegmentsFinished) {
+ return finalStatus();
+ }
+ try {
+ wait(10*1000); // or wait()?
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+ }
+
+ private Segment chooseUnstartedSegment() {
+ synchronized(unstartedSegments) {
+ if(unstartedSegments.isEmpty()) return null;
+ int x = fctx.random.nextInt(unstartedSegments.size());
+ Logger.minor(this, "Starting segment "+x+" of
"+unstartedSegments.size());
+ Segment s = (Segment) unstartedSegments.remove(x);
+ return s;
+ }
+ }
+
+ /** Return the final status of the fetch. Throws an exception, or
returns a
+ * Bucket containing the fetched data.
+ * @throws FetchException If the fetch failed for some reason.
+ */
+ private Bucket finalStatus() throws FetchException {
+ long finalLength = 0;
+ for(int i=0;i<segments.length;i++) {
+ Segment s = segments[i];
+ if(!s.isFinished()) throw new
IllegalStateException("Not all finished");
+ s.throwError();
+ // If still here, it succeeded
+ finalLength += s.decodedLength();
+ // Healing is done by Segment
+ }
+ if(finalLength > overrideLength)
+ finalLength = overrideLength;
+
+ long bytesWritten = 0;
+ OutputStream os = null;
+ Bucket output;
+ try {
+ output = fctx.bucketFactory.makeBucket(finalLength);
+ os = output.getOutputStream();
+ for(int i=0;i<segments.length;i++) {
+ Segment s = segments[i];
+ long max = (finalLength < 0 ? 0 : (finalLength
- bytesWritten));
+ bytesWritten += s.writeDecodedDataTo(os, max);
+ }
+ } catch (IOException e) {
+ throw new FetchException(FetchException.BUCKET_ERROR,
e);
+ } finally {
+ if(os != null) {
+ try {
+ os.close();
+ } catch (IOException e) {
+ // If it fails to close it may return
corrupt data.
+ throw new
FetchException(FetchException.BUCKET_ERROR, e);
+ }
+ }
+ }
+ return output;
+ }
+
+ public void gotBlocks(Segment segment) {
+ Logger.minor(this, "Got blocks for segment: "+segment);
+ synchronized(this) {
+ fetchingSegment = null;
+ notifyAll();
+ }
+ }
+
+ public void segmentFinished(Segment segment) {
+ Logger.minor(this, "Finished segment: "+segment);
+ synchronized(this) {
+ boolean allDone = true;
+ for(int i=0;i<segments.length;i++)
+ if(!segments[i].isFinished()) {
+ Logger.minor(this, "Segment
"+segments[i]+" is not finished");
+ allDone = false;
+ }
+ if(allDone) allSegmentsFinished = true;
+ notifyAll();
+ }
+ }
+
+ public void onProgress() {
+ int totalBlocks = splitfileDataBlocks.length;
+ int fetchedBlocks = 0;
+ int failedBlocks = 0;
+ int fatallyFailedBlocks = 0;
+ int runningBlocks = 0;
+ for(int i=0;i<segments.length;i++) {
+ Logger.minor(this, "Segment: "+segments[i]+":
fetched="+segments[i].fetchedBlocks()+", failedBlocks:
"+segments[i].failedBlocks()+
+ ", fatally:
"+segments[i].fatallyFailedBlocks()+", running: "+segments[i].runningBlocks());
+ fetchedBlocks += segments[i].fetchedBlocks();
+ failedBlocks += segments[i].failedBlocks();
+ fatallyFailedBlocks +=
segments[i].fatallyFailedBlocks();
+ runningBlocks += segments[i].runningBlocks();
+ }
+ fctx.eventProducer.produceEvent(new
SplitfileProgressEvent(totalBlocks, fetchedBlocks, failedBlocks,
fatallyFailedBlocks, runningBlocks));
+ }
+
+}
Modified: branches/async-client-layer/src/freenet/client/SplitInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitInserter.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/client/SplitInserter.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -30,7 +30,7 @@
final int blockSize;
final boolean isMetadata;
final Bucket returnMetadata;
- SplitfileBlock[] origDataBlocks;
+ StartableSplitfileBlock[] origDataBlocks;
InsertSegment encodingSegment;
InsertSegment[] segments;
private boolean finishedInserting = false;
@@ -39,7 +39,7 @@
private int failed;
private int fatalErrors;
private int countCheckBlocks;
- private SplitfileBlock[] fatalErrorBlocks;
+ private StartableSplitfileBlock[] fatalErrorBlocks;
private FileInserter inserter;
/**
@@ -232,7 +232,7 @@
*/
private void splitIntoBlocks() throws IOException {
Bucket[] dataBuckets = BucketTools.split(origData,
NodeCHK.BLOCK_SIZE, ctx.bf);
- origDataBlocks = new SplitfileBlock[dataBuckets.length];
+ origDataBlocks = new
StartableSplitfileBlock[dataBuckets.length];
for(int i=0;i<origDataBlocks.length;i++) {
origDataBlocks[i] = new BlockInserter(dataBuckets[i],
i, tracker, ctx, getCHKOnly);
if(origDataBlocks[i].getData() == null)
@@ -259,7 +259,7 @@
int segNo = 0;
for(int i=segmentSize;;i+=segmentSize) {
if(i > dataBlocks) i = dataBlocks;
- SplitfileBlock[] seg = new SplitfileBlock[i-j];
+ StartableSplitfileBlock[] seg = new
StartableSplitfileBlock[i-j];
System.arraycopy(origDataBlocks, j, seg, 0,
i-j);
j = i;
for(int x=0;x<seg.length;x++)
@@ -275,7 +275,7 @@
segments = (InsertSegment[]) segs.toArray(new
InsertSegment[segs.size()]);
}
- public void finished(SplitfileBlock[] succeeded, SplitfileBlock[]
failed, SplitfileBlock[] fatalErrors) {
+ public void finished(StartableSplitfileBlock[] succeeded,
StartableSplitfileBlock[] failed, StartableSplitfileBlock[] fatalErrors) {
synchronized(this) {
finishedInserting = true;
this.succeeded = succeeded.length;
Deleted: branches/async-client-layer/src/freenet/client/SplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitfileBlock.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/SplitfileBlock.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,38 +0,0 @@
-package freenet.client;
-
-import freenet.client.RetryTracker.Level;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-
-/** Simple interface for a splitfile block */
-public abstract class SplitfileBlock {
-
- /** Get block number. [0,k[ = data blocks, [k, n[ = check blocks */
- abstract int getNumber();
-
- /** Has data? */
- abstract boolean hasData();
-
- /** Get data */
- abstract Bucket getData();
-
- /** Set data */
- abstract void setData(Bucket data);
-
- /** Start the fetch (or insert). Implementation is required to call
relevant
- * methods on RetryTracker when done. */
- abstract void start();
-
- /**
- * Shut down the fetch as soon as reasonably possible.
- */
- abstract public void kill();
-
- /**
- * Get the URI of the file. For an insert, this is derived during
insert.
- * For a request, it is fixed in the constructor.
- */
- abstract public FreenetURI getURI();
-
- abstract public int getRetryCount();
-}
Copied:
branches/async-client-layer/src/freenet/client/StartableSplitfileBlock.java
(from rev 7871, trunk/freenet/src/freenet/client/SplitfileBlock.java)
===================================================================
--- trunk/freenet/src/freenet/client/SplitfileBlock.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/StartableSplitfileBlock.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -0,0 +1,25 @@
+package freenet.client;
+
+import freenet.keys.FreenetURI;
+
+/** Simple interface for a splitfile block */
+public interface StartableSplitfileBlock extends SplitfileBlock {
+
+ /** Start the fetch (or insert). Implementation is required to call
relevant
+ * methods on RetryTracker when done. */
+ abstract void start();
+
+ /**
+ * Shut down the fetch as soon as reasonably possible.
+ */
+ abstract public void kill();
+
+ abstract public int getRetryCount();
+
+ /**
+ * Get the URI of the file. For an insert, this is derived during
insert.
+ * For a request, it is fixed in the constructor.
+ */
+ abstract public FreenetURI getURI();
+
+}
Modified: branches/async-client-layer/src/freenet/client/StdSplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/StdSplitfileBlock.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/client/StdSplitfileBlock.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -3,7 +3,7 @@
import freenet.support.Bucket;
import freenet.support.Logger;
-public abstract class StdSplitfileBlock extends SplitfileBlock implements
Runnable {
+public abstract class StdSplitfileBlock implements StartableSplitfileBlock ,
Runnable {
Bucket fetchedData;
protected final RetryTracker tracker;
Deleted: branches/async-client-layer/src/freenet/clients/http/FproxyToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/FproxyToadlet.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/clients/http/FproxyToadlet.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,62 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.MalformedURLException;
-import java.net.URI;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.HighLevelSimpleClient;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.HTMLEncoder;
-import freenet.support.Logger;
-
-public class FproxyToadlet extends Toadlet {
-
- public FproxyToadlet(HighLevelSimpleClient client) {
- super(client);
- }
-
- void handleGet(URI uri, ToadletContext ctx)
- throws ToadletContextClosedException, IOException {
- String ks = uri.toString();
- if(ks.startsWith("/"))
- ks = ks.substring(1);
- FreenetURI key;
- try {
- key = new FreenetURI(ks);
- } catch (MalformedURLException e) {
- this.writeReply(ctx, 400, "text/html", "Invalid key",
"<html><head><title>Invalid key</title></head><body>Expected a freenet key, but
got "+HTMLEncoder.encode(ks)+"</body></html>");
- return;
- }
- try {
- FetchResult result = fetch(key);
- writeReply(ctx, 200, result.getMimeType(), "OK",
result.asBucket());
- } catch (FetchException e) {
- String msg = e.getMessage();
- this.writeReply(ctx, 500 /* close enough - FIXME
probably should depend on status code */,
- "text/html", msg,
"<html><head><title>"+msg+"</title></head><body>Error:
"+HTMLEncoder.encode(msg)+"</body></html>");
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- String msg = "<html><head><title>Internal
Error</title></head><body><h1>Internal Error: please report</h1><pre>";
- StringWriter sw = new StringWriter();
- PrintWriter pw = new PrintWriter(sw);
- t.printStackTrace(pw);
- pw.flush();
- msg = msg + sw.toString() + "</pre></body></html>";
- this.writeReply(ctx, 500, "text/html", "Internal
Error", msg);
- }
- }
-
- void handlePut(URI uri, Bucket data, ToadletContext ctx)
- throws ToadletContextClosedException, IOException {
- String notSupported = "<html><head><title>Not
supported</title></head><body>"+
- "Operation not supported</body>";
- // FIXME should be 405? Need to let toadlets indicate what is
allowed maybe in a callback?
- this.writeReply(ctx, 200, "text/html", "OK", notSupported);
- }
-
-}
Copied: branches/async-client-layer/src/freenet/clients/http/FproxyToadlet.java
(from rev 7874, trunk/freenet/src/freenet/clients/http/FproxyToadlet.java)
Deleted: branches/async-client-layer/src/freenet/node/Node.java
===================================================================
--- trunk/freenet/src/freenet/node/Node.java 2006-01-18 00:45:46 UTC (rev
7871)
+++ branches/async-client-layer/src/freenet/node/Node.java 2006-01-20
19:03:11 UTC (rev 7885)
@@ -1,1468 +0,0 @@
-/*
- * Freenet 0.7 node.
- *
- * Designed primarily for darknet operation, but should also be usable
- * in open mode eventually.
- */
-package freenet.node;
-
-import java.io.BufferedReader;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.net.InetAddress;
-import java.net.SocketException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-
-import freenet.client.ArchiveManager;
-import freenet.client.HighLevelSimpleClient;
-import freenet.client.HighLevelSimpleClientImpl;
-import freenet.clients.http.FproxyToadlet;
-import freenet.clients.http.SimpleToadletServer;
-import freenet.crypt.DSAPublicKey;
-import freenet.crypt.DiffieHellman;
-import freenet.crypt.RandomSource;
-import freenet.crypt.Yarrow;
-import freenet.io.comm.DMT;
-import freenet.io.comm.DisconnectedException;
-import freenet.io.comm.Message;
-import freenet.io.comm.MessageFilter;
-import freenet.io.comm.Peer;
-import freenet.io.comm.PeerParseException;
-import freenet.io.comm.UdpSocketManager;
-import freenet.io.xfer.AbortedException;
-import freenet.io.xfer.BlockTransmitter;
-import freenet.io.xfer.PartiallyReceivedBlock;
-import freenet.keys.CHKBlock;
-import freenet.keys.CHKVerifyException;
-import freenet.keys.ClientCHK;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.ClientSSK;
-import freenet.keys.ClientSSKBlock;
-import freenet.keys.Key;
-import freenet.keys.KeyBlock;
-import freenet.keys.NodeCHK;
-import freenet.keys.NodeSSK;
-import freenet.keys.SSKBlock;
-import freenet.keys.SSKVerifyException;
-import freenet.store.BerkeleyDBFreenetStore;
-import freenet.store.FreenetStore;
-import freenet.support.BucketFactory;
-import freenet.support.FileLoggerHook;
-import freenet.support.HexUtil;
-import freenet.support.ImmutableByteArrayWrapper;
-import freenet.support.LRUHashtable;
-import freenet.support.LRUQueue;
-import freenet.support.Logger;
-import freenet.support.PaddedEphemerallyEncryptedBucketFactory;
-import freenet.support.SimpleFieldSet;
-import freenet.support.io.FilenameGenerator;
-import freenet.support.io.TempBucketFactory;
-import freenet.transport.IPAddressDetector;
-
-/**
- * @author amphibian
- */
-public class Node implements QueueingSimpleLowLevelClient {
-
- static final long serialVersionUID = -1;
-
- /** If true, local requests and inserts aren't cached.
- * This opens up a glaring vulnerability; connected nodes
- * can then probe the store, and if the node doesn't have the
- * content, they know for sure that it was a local request.
- * HOWEVER, if we don't do this, then a non-full seized
- * datastore will contain everything requested by the user...
- * Also, remote probing is possible.
- *
- * So it may be useful on some darknets, and is useful for
- * debugging, but in general should be off on opennet and
- * most darknets.
- */
- public static final boolean DONT_CACHE_LOCAL_REQUESTS = true;
- public static final int PACKETS_IN_BLOCK = 32;
- public static final int PACKET_SIZE = 1024;
- public static final double DECREMENT_AT_MIN_PROB = 0.25;
- public static final double DECREMENT_AT_MAX_PROB = 0.5;
- // Send keepalives every 2.5-5.0 seconds
- public static final int KEEPALIVE_INTERVAL = 2500;
- // If no activity for 30 seconds, node is dead
- public static final int MAX_PEER_INACTIVITY = 60000;
- /** Time after which a handshake is assumed to have failed. */
- public static final int HANDSHAKE_TIMEOUT = 5000;
- // Inter-handshake time must be at least 2x handshake timeout
- public static final int MIN_TIME_BETWEEN_HANDSHAKE_SENDS =
HANDSHAKE_TIMEOUT*2; // 10-15 secs
- public static final int RANDOMIZED_TIME_BETWEEN_HANDSHAKE_SENDS =
HANDSHAKE_TIMEOUT;
- public static final int MIN_TIME_BETWEEN_VERSION_PROBES =
HANDSHAKE_TIMEOUT*4;
- public static final int RANDOMIZED_TIME_BETWEEN_VERSION_PROBES =
HANDSHAKE_TIMEOUT*2; // 20-30 secs
- // If we don't receive any packets at all in this period, from any node,
tell the user
- public static final long ALARM_TIME = 60*1000;
- /** Sub-max ping time. If ping is greater than this, we reject some
requests. */
- public static final long SUB_MAX_PING_TIME = 1000;
- /** Maximum overall average ping time. If ping is greater than this,
- * we reject all requests.
- */
- public static final long MAX_PING_TIME = 2000;
- /** Accept one request every 10 seconds regardless, to ensure we update the
- * block send time.
- */
- public static final int MAX_INTERREQUEST_TIME = 10*1000;
-
- // 900ms
- static final int MIN_INTERVAL_BETWEEN_INCOMING_SWAP_REQUESTS = 900;
- public static final int SYMMETRIC_KEY_LENGTH = 32; // 256 bits - note that
this isn't used everywhere to determine it
-
- // FIXME: abstract out address stuff? Possibly to something like
NodeReference?
- final int portNumber;
-
- /** These 3 are private because must be protected by synchronized(this) */
- /** The CHK datastore */
- private final FreenetStore chkDatastore;
- /** The SSK datastore */
- private final FreenetStore sskDatastore;
- /** The store of DSAPublicKeys (by hash) */
- private final FreenetStore pubKeyDatastore;
- /** RequestSender's currently running, by KeyHTLPair */
- private final HashMap requestSenders;
- /** RequestSender's currently transferring, by key */
- private final HashMap transferringRequestSenders;
- /** CHKInsertSender's currently running, by KeyHTLPair */
- private final HashMap insertSenders;
- /** IP address detector */
- private final IPAddressDetector ipDetector;
-
- private final HashSet runningUIDs;
-
- byte[] myIdentity; // FIXME: simple identity block; should be unique
- /** Hash of identity. Used as setup key. */
- byte[] identityHash;
- /** Hash of hash of identity i.e. hash of setup key. */
- byte[] identityHashHash;
- String myName;
- final LocationManager lm;
- final PeerManager peers; // my peers
- final RandomSource random; // strong RNG
- final UdpSocketManager usm;
- final FNPPacketMangler packetMangler;
- final PacketSender ps;
- final NodeDispatcher dispatcher;
- final NodePinger nodePinger;
- final String filenamesPrefix;
- final FilenameGenerator tempFilenameGenerator;
- final FileLoggerHook fileLoggerHook;
- static final int MAX_CACHED_KEYS = 1000;
- final LRUHashtable cachedPubKeys;
- final boolean testnetEnabled;
- final int testnetPort;
- static short MAX_HTL = 10;
- static final int EXIT_STORE_FILE_NOT_FOUND = 1;
- static final int EXIT_STORE_IOEXCEPTION = 2;
- static final int EXIT_STORE_OTHER = 3;
- static final int EXIT_USM_DIED = 4;
- public static final int EXIT_YARROW_INIT_FAILED = 5;
- static final int EXIT_TEMP_INIT_ERROR = 6;
- static final int EXIT_TESTNET_FAILED = 7;
- public static final int EXIT_MAIN_LOOP_LOST = 8;
-
- public final long bootID;
- public final long startupTime;
-
- // Client stuff
- final ArchiveManager archiveManager;
- final BucketFactory tempBucketFactory;
- final RequestThrottle requestThrottle;
- final RequestStarter requestStarter;
- final RequestThrottle insertThrottle;
- final RequestStarter insertStarter;
- final File downloadDir;
- final TestnetHandler testnetHandler;
- final TestnetStatusUploader statusUploader;
-
- // Client stuff that needs to be configged - FIXME
- static final int MAX_ARCHIVE_HANDLERS = 200; // don't take up much RAM...
FIXME
- static final long MAX_CACHED_ARCHIVE_DATA = 32*1024*1024; // make a fixed
fraction of the store by default? FIXME
- static final long MAX_ARCHIVE_SIZE = 1024*1024; // ??? FIXME
- static final long MAX_ARCHIVED_FILE_SIZE = 1024*1024; // arbitrary... FIXME
- static final int MAX_CACHED_ELEMENTS = 1024; // equally arbitrary! FIXME
hopefully we can cache many of these though
-
- /**
- * Read all storable settings (identity etc) from the node file.
- * @param filename The name of the file to read from.
- */
- private void readNodeFile(String filename) throws IOException {
- // REDFLAG: Any way to share this code with NodePeer?
- FileInputStream fis = new FileInputStream(filename);
- InputStreamReader isr = new InputStreamReader(fis);
- BufferedReader br = new BufferedReader(isr);
- SimpleFieldSet fs = new SimpleFieldSet(br);
- br.close();
- // Read contents
- String physical = fs.get("physical.udp");
- Peer myOldPeer;
- try {
- myOldPeer = new Peer(physical);
- } catch (PeerParseException e) {
- IOException e1 = new IOException();
- e1.initCause(e);
- throw e1;
- }
- if(myOldPeer.getPort() != portNumber)
- throw new IllegalArgumentException("Wrong port number "+
- myOldPeer.getPort()+" should be "+portNumber);
- // FIXME: we ignore the IP for now, and hardcode it to localhost
- String identity = fs.get("identity");
- if(identity == null)
- throw new IOException();
- myIdentity = HexUtil.hexToBytes(identity);
- MessageDigest md;
- try {
- md = MessageDigest.getInstance("SHA-256");
- } catch (NoSuchAlgorithmException e) {
- throw new Error(e);
- }
- identityHash = md.digest(myIdentity);
- identityHashHash = md.digest(identityHash);
- String loc = fs.get("location");
- Location l;
- try {
- l = new Location(loc);
- } catch (FSParseException e) {
- IOException e1 = new IOException();
- e1.initCause(e);
- throw e1;
- }
- lm.setLocation(l);
- myName = fs.get("myName");
- if(myName == null) {
- myName = newName();
- }
- }
-
- private String newName() {
- return "Node created around "+System.currentTimeMillis();
- }
-
- public void writeNodeFile() {
- try {
- writeNodeFile(filenamesPrefix+"node-"+portNumber,
filenamesPrefix+"node-"+portNumber+".bak");
- } catch (IOException e) {
- Logger.error(this, "Cannot write node file!: "+e+" :
"+"node-"+portNumber);
- }
- }
-
- private void writeNodeFile(String filename, String backupFilename) throws
IOException {
- SimpleFieldSet fs = exportFieldSet();
- File orig = new File(filename);
- File backup = new File(backupFilename);
- orig.renameTo(backup);
- FileOutputStream fos = new FileOutputStream(filename);
- OutputStreamWriter osr = new OutputStreamWriter(fos);
- fs.writeTo(osr);
- osr.close();
- }
-
- private void initNodeFileSettings(RandomSource r) {
- Logger.normal(this, "Creating new node file from scratch");
- // Don't need to set portNumber
- // FIXME use a real IP!
- myIdentity = new byte[32];
- r.nextBytes(myIdentity);
- MessageDigest md;
- try {
- md = MessageDigest.getInstance("SHA-256");
- } catch (NoSuchAlgorithmException e) {
- throw new Error(e);
- }
- identityHash = md.digest(myIdentity);
- identityHashHash = md.digest(identityHash);
- myName = newName();
- }
-
- /**
- * Read the port number from the arguments.
- * Then create a node.
- */
- public static void main(String[] args) throws IOException {
- int length = args.length;
- if (length < 1 || length > 3) {
- System.out.println("Usage: $ java freenet.node.Node
<portNumber> [ipOverride] [max data packets / second]");
- return;
- }
-
- int port = Integer.parseInt(args[0]);
- System.out.println("Port number: "+port);
- File logDir = new File("logs-"+port);
- logDir.mkdir();
- FileLoggerHook logger = new FileLoggerHook(true, new File(logDir,
"freenet-"+port).getAbsolutePath(),
- "d (c, t, p): m", "MMM dd, yyyy HH:mm:ss:SSS",
Logger.MINOR, false, true,
- 1024*1024*1024 /* 1GB of old compressed logfiles */);
- logger.setInterval("5MINUTES");
- Logger.setupChain();
- Logger.globalSetThreshold(Logger.MINOR);
- Logger.globalAddHook(logger);
- logger.start();
- Logger.normal(Node.class, "Creating node...");
- Yarrow yarrow = new Yarrow();
- InetAddress overrideIP = null;
- int packetsPerSecond = 15;
- if(args.length > 1) {
- overrideIP = InetAddress.getByName(args[1]);
- System.err.println("Overriding IP detection:
"+overrideIP.getHostAddress());
- if(args.length > 2) {
- packetsPerSecond = Integer.parseInt(args[2]);
- }
- }
- DiffieHellman.init(yarrow);
- Node n = new Node(port, yarrow, overrideIP, "", 1000 /
packetsPerSecond, true, logger, 16384);
- n.start(new StaticSwapRequestInterval(2000));
- new TextModeClientInterface(n);
- Thread t = new Thread(new MemoryChecker(), "Memory checker");
- t.setPriority(Thread.MAX_PRIORITY);
- t.start();
- SimpleToadletServer server = new SimpleToadletServer(port+2001);
- FproxyToadlet fproxy = new
FproxyToadlet(n.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS,
(short)0));
- server.register(fproxy, "/", false);
- System.out.println("Starting fproxy on port "+(port+2001));
- //server.register(fproxy, "/SSK@", false);
- //server.register(fproxy, "/KSK@", false);
- }
-
- // FIXME - the whole overrideIP thing is a hack to avoid config
- // Implement the config!
- Node(int port, RandomSource rand, InetAddress overrideIP, String prefix,
int throttleInterval, boolean enableTestnet, FileLoggerHook logger, int
maxStoreKeys) {
- this.fileLoggerHook = logger;
- cachedPubKeys = new LRUHashtable();
- if(enableTestnet) {
- Logger.error(this, "WARNING: ENABLING TESTNET CODE! This may
seriously jeopardize your anonymity!");
- testnetEnabled = true;
- testnetPort = 1024 + (port-1024+1000) % (65536 - 1024);
- testnetHandler = new TestnetHandler(this, testnetPort);
- statusUploader = new TestnetStatusUploader(this, 180000);
- } else {
- testnetEnabled = false;
- testnetPort = -1;
- testnetHandler = null;
- statusUploader = null;
- }
- portNumber = port;
- startupTime = System.currentTimeMillis();
- recentlyCompletedIDs = new LRUQueue();
- ipDetector = new IPAddressDetector(10*1000, this);
- if(prefix == null) prefix = "";
- filenamesPrefix = prefix;
- this.overrideIPAddress = overrideIP;
- downloadDir = new File("downloads");
- downloadDir.mkdir();
- try {
- chkDatastore = new
BerkeleyDBFreenetStore(prefix+"store-"+portNumber, maxStoreKeys, 32768,
CHKBlock.TOTAL_HEADERS_LENGTH);
- sskDatastore = new
BerkeleyDBFreenetStore(prefix+"sskstore-"+portNumber, maxStoreKeys, 1024,
SSKBlock.TOTAL_HEADERS_LENGTH);
- pubKeyDatastore = new
BerkeleyDBFreenetStore(prefix+"pubkeystore-"+portNumber, maxStoreKeys,
DSAPublicKey.PADDED_SIZE, 0);
- } catch (FileNotFoundException e1) {
- Logger.error(this, "Could not open datastore: "+e1, e1);
- System.err.println("Could not open datastore: "+e1);
- System.exit(EXIT_STORE_FILE_NOT_FOUND);
- throw new Error();
- } catch (IOException e1) {
- Logger.error(this, "Could not open datastore: "+e1, e1);
- System.err.println("Could not open datastore: "+e1);
- System.exit(EXIT_STORE_IOEXCEPTION);
- throw new Error();
- } catch (Exception e1) {
- Logger.error(this, "Could not open datastore: "+e1, e1);
- System.err.println("Could not open datastore: "+e1);
- System.exit(EXIT_STORE_OTHER);
- throw new Error();
- }
- random = rand;
- requestSenders = new HashMap();
- transferringRequestSenders = new HashMap();
- insertSenders = new HashMap();
- runningUIDs = new HashSet();
-
- BlockTransmitter.setMinPacketInterval(throttleInterval);
-
- /*
- * FIXME: test the soft limit.
- *
- * The soft limit is implemented, except for:
- * - We need to write the current status to disk every 1 minute or so.
- * - When we start up, we need to read this in, assume that the node
sent
- * as many packets as it was allowed to in the following minute, and
- * then shut down before writing again (worst case scenario).
- * - We need to test the soft limit!
- */
- BlockTransmitter.setSoftLimitPeriod(14*24*60*60*1000);
- BlockTransmitter.setSoftMinPacketInterval(0);
-
- lm = new LocationManager(random);
-
- try {
- readNodeFile(prefix+"node-"+portNumber);
- } catch (IOException e) {
- try {
- readNodeFile(prefix+"node-"+portNumber+".bak");
- } catch (IOException e1) {
- initNodeFileSettings(random);
- }
- }
- writeNodeFile();
-
- ps = new PacketSender(this);
- peers = new PeerManager(this, prefix+"peers-"+portNumber);
-
- try {
- usm = new UdpSocketManager(portNumber);
- usm.setDispatcher(dispatcher=new NodeDispatcher(this));
- usm.setLowLevelFilter(packetMangler = new FNPPacketMangler(this));
- } catch (SocketException e2) {
- Logger.error(this, "Could not listen for traffic: "+e2, e2);
- System.exit(EXIT_USM_DIED);
- throw new Error();
- }
- decrementAtMax = random.nextDouble() <= DECREMENT_AT_MAX_PROB;
- decrementAtMin = random.nextDouble() <= DECREMENT_AT_MIN_PROB;
- bootID = random.nextLong();
- peers.writePeers();
- try {
- String dirName = "temp-"+portNumber;
- tempFilenameGenerator = new FilenameGenerator(random,
true, new File(dirName), "temp-");
- } catch (IOException e) {
- Logger.error(this, "Could not create temp bucket
factory: "+e, e);
- System.exit(EXIT_TEMP_INIT_ERROR);
- throw new Error();
- }
- nodePinger = new NodePinger(this);
- tempBucketFactory = new
PaddedEphemerallyEncryptedBucketFactory(new
TempBucketFactory(tempFilenameGenerator), random, 1024);
- archiveManager = new ArchiveManager(MAX_ARCHIVE_HANDLERS,
MAX_CACHED_ARCHIVE_DATA, MAX_ARCHIVE_SIZE, MAX_ARCHIVED_FILE_SIZE,
MAX_CACHED_ELEMENTS, random, tempFilenameGenerator);
- requestThrottle = new RequestThrottle(5000, 2.0F);
- requestStarter = new RequestStarter(requestThrottle, "Request
starter ("+portNumber+")");
- //insertThrottle = new ChainedRequestThrottle(10000, 2.0F,
requestThrottle);
- // FIXME reenable the above
- insertThrottle = new RequestThrottle(10000, 2.0F);
- insertStarter = new RequestStarter(insertThrottle, "Insert
starter ("+portNumber+")");
- if(testnetHandler != null)
- testnetHandler.start();
- if(statusUploader != null)
- statusUploader.start();
- System.err.println("Created Node on port "+port);
- }
-
- void start(SwapRequestInterval interval) {
- if(interval != null)
- lm.startSender(this, interval);
- ps.start();
- usm.start();
- }
-
- public ClientKeyBlock getKey(ClientKey key, boolean localOnly,
RequestStarterClient client, boolean cache) throws LowLevelGetException {
- if(key instanceof ClientSSK) {
- ClientSSK k = (ClientSSK) key;
- if(k.getPubKey() != null)
- cacheKey(k.pubKeyHash, k.getPubKey());
- }
- if(localOnly)
- return realGetKey(key, localOnly, cache);
- else
- return client.getKey(key, localOnly, cache);
- }
-
- public ClientKeyBlock realGetKey(ClientKey key, boolean localOnly, boolean
cache) throws LowLevelGetException {
- if(key instanceof ClientCHK)
- return realGetCHK((ClientCHK)key, localOnly, cache);
- else if(key instanceof ClientSSK)
- return realGetSSK((ClientSSK)key, localOnly, cache);
- else
- throw new IllegalArgumentException("Not a CHK or SSK: "+key);
- }
-
- /**
- * Really trivially simple client interface.
- * Either it succeeds or it doesn't.
- */
- ClientCHKBlock realGetCHK(ClientCHK key, boolean localOnly, boolean cache)
throws LowLevelGetException {
- long startTime = System.currentTimeMillis();
- long uid = random.nextLong();
- if(!lockUID(uid)) {
- Logger.error(this, "Could not lock UID just randomly generated:
"+uid+" - probably indicates broken PRNG");
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- }
- Object o = makeRequestSender(key.getNodeCHK(), MAX_HTL, uid, null,
lm.loc.getValue(), localOnly, cache);
- if(o instanceof CHKBlock) {
- try {
- return new ClientCHKBlock((CHKBlock)o, key);
- } catch (CHKVerifyException e) {
- Logger.error(this, "Does not verify: "+e, e);
- throw new
LowLevelGetException(LowLevelGetException.DECODE_FAILED);
- }
- }
- if(o == null) {
- throw new
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND_IN_STORE);
- }
- RequestSender rs = (RequestSender)o;
- boolean rejectedOverload = false;
- while(true) {
- if(rs.waitUntilStatusChange() && (!rejectedOverload)) {
- requestThrottle.requestRejectedOverload();
- rejectedOverload = true;
- }
-
- int status = rs.getStatus();
-
- if(status == RequestSender.NOT_FINISHED)
- continue;
-
- if(status == RequestSender.TIMED_OUT ||
- status ==
RequestSender.GENERATED_REJECTED_OVERLOAD) {
- if(!rejectedOverload) {
- requestThrottle.requestRejectedOverload();
- rejectedOverload = true;
- }
- } else {
- if(status == RequestSender.DATA_NOT_FOUND ||
- status == RequestSender.SUCCESS ||
- status == RequestSender.ROUTE_NOT_FOUND
||
- status == RequestSender.VERIFY_FAILURE)
{
- long rtt = System.currentTimeMillis() -
startTime;
- requestThrottle.requestCompleted(rtt);
- }
- }
-
- if(rs.getStatus() == RequestSender.SUCCESS) {
- try {
- return new
ClientCHKBlock(rs.getPRB().getBlock(), rs.getHeaders(), key, true);
- } catch (CHKVerifyException e) {
- Logger.error(this, "Does not verify: "+e, e);
- throw new
LowLevelGetException(LowLevelGetException.DECODE_FAILED);
- } catch (AbortedException e) {
- Logger.error(this, "Impossible: "+e, e);
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- }
- } else {
- switch(rs.getStatus()) {
- case RequestSender.NOT_FINISHED:
- Logger.error(this, "RS still running in
getCHK!: "+rs);
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- case RequestSender.DATA_NOT_FOUND:
- throw new
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND);
- case RequestSender.ROUTE_NOT_FOUND:
- throw new
LowLevelGetException(LowLevelGetException.ROUTE_NOT_FOUND);
- case RequestSender.TRANSFER_FAILED:
- throw new
LowLevelGetException(LowLevelGetException.TRANSFER_FAILED);
- case RequestSender.VERIFY_FAILURE:
- throw new
LowLevelGetException(LowLevelGetException.VERIFY_FAILED);
- case RequestSender.GENERATED_REJECTED_OVERLOAD:
- case RequestSender.TIMED_OUT:
- throw new
LowLevelGetException(LowLevelGetException.REJECTED_OVERLOAD);
- case RequestSender.INTERNAL_ERROR:
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- default:
- Logger.error(this, "Unknown RequestSender code
in getCHK: "+rs.getStatus()+" on "+rs);
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- }
- }
- }
- }
-
- /**
- * Really trivially simple client interface.
- * Either it succeeds or it doesn't.
- */
- ClientSSKBlock realGetSSK(ClientSSK key, boolean localOnly, boolean cache)
throws LowLevelGetException {
- long startTime = System.currentTimeMillis();
- long uid = random.nextLong();
- if(!lockUID(uid)) {
- Logger.error(this, "Could not lock UID just randomly generated:
"+uid+" - probably indicates broken PRNG");
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- }
- Object o = makeRequestSender(key.getNodeKey(), MAX_HTL, uid, null,
lm.loc.getValue(), localOnly, cache);
- if(o instanceof SSKBlock) {
- try {
- SSKBlock block = (SSKBlock)o;
- key.setPublicKey(block.getPubKey());
- return new ClientSSKBlock(block, key);
- } catch (SSKVerifyException e) {
- Logger.error(this, "Does not verify: "+e, e);
- throw new
LowLevelGetException(LowLevelGetException.DECODE_FAILED);
- }
- }
- if(o == null) {
- throw new
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND_IN_STORE);
- }
- RequestSender rs = (RequestSender)o;
- boolean rejectedOverload = false;
- while(true) {
- if(rs.waitUntilStatusChange() && (!rejectedOverload)) {
- requestThrottle.requestRejectedOverload();
- rejectedOverload = true;
- }
-
- int status = rs.getStatus();
-
- if(status == RequestSender.NOT_FINISHED)
- continue;
-
- if(status == RequestSender.TIMED_OUT ||
- status ==
RequestSender.GENERATED_REJECTED_OVERLOAD) {
- if(!rejectedOverload) {
- requestThrottle.requestRejectedOverload();
- rejectedOverload = true;
- }
- } else {
- if(status == RequestSender.DATA_NOT_FOUND ||
- status == RequestSender.SUCCESS ||
- status == RequestSender.ROUTE_NOT_FOUND
||
- status == RequestSender.VERIFY_FAILURE)
{
- long rtt = System.currentTimeMillis() -
startTime;
- requestThrottle.requestCompleted(rtt);
- }
- }
-
- if(rs.getStatus() == RequestSender.SUCCESS) {
- try {
- SSKBlock block = rs.getSSKBlock();
- key.setPublicKey(block.getPubKey());
- return new ClientSSKBlock(block, key);
- } catch (SSKVerifyException e) {
- Logger.error(this, "Does not verify: "+e, e);
- throw new
LowLevelGetException(LowLevelGetException.DECODE_FAILED);
- }
- } else {
- switch(rs.getStatus()) {
- case RequestSender.NOT_FINISHED:
- Logger.error(this, "RS still running in
getCHK!: "+rs);
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- case RequestSender.DATA_NOT_FOUND:
- throw new
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND);
- case RequestSender.ROUTE_NOT_FOUND:
- throw new
LowLevelGetException(LowLevelGetException.ROUTE_NOT_FOUND);
- case RequestSender.TRANSFER_FAILED:
- Logger.error(this, "WTF? Transfer failed on an
SSK? on "+uid);
- throw new
LowLevelGetException(LowLevelGetException.TRANSFER_FAILED);
- case RequestSender.VERIFY_FAILURE:
- throw new
LowLevelGetException(LowLevelGetException.VERIFY_FAILED);
- case RequestSender.GENERATED_REJECTED_OVERLOAD:
- case RequestSender.TIMED_OUT:
- throw new
LowLevelGetException(LowLevelGetException.REJECTED_OVERLOAD);
- case RequestSender.INTERNAL_ERROR:
- default:
- Logger.error(this, "Unknown RequestSender code
in getCHK: "+rs.getStatus()+" on "+rs);
- throw new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
- }
- }
- }
- }
-
- public void putKey(ClientKeyBlock block, RequestStarterClient client,
boolean cache) throws LowLevelPutException {
- client.putKey(block, cache);
- }
-
- public void realPut(ClientKeyBlock block, boolean cache) throws
LowLevelPutException {
- if(block instanceof ClientCHKBlock)
- realPutCHK((ClientCHKBlock)block, cache);
- else if(block instanceof ClientSSKBlock)
- realPutSSK((ClientSSKBlock)block, cache);
- else
- throw new IllegalArgumentException("Unknown put type
"+block.getClass());
- }
-
- public void realPutCHK(ClientCHKBlock block, boolean cache) throws
LowLevelPutException {
- byte[] data = block.getData();
- byte[] headers = block.getHeaders();
- PartiallyReceivedBlock prb = new
PartiallyReceivedBlock(PACKETS_IN_BLOCK, PACKET_SIZE, data);
- CHKInsertSender is;
- long uid = random.nextLong();
- if(!lockUID(uid)) {
- Logger.error(this, "Could not lock UID just randomly generated:
"+uid+" - probably indicates broken PRNG");
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- }
- long startTime = System.currentTimeMillis();
- synchronized(this) {
- if(cache) {
- try {
- chkDatastore.put(block);
- } catch (IOException e) {
- Logger.error(this, "Datastore failure: "+e, e);
- }
- }
- is = makeInsertSender((NodeCHK)block.getClientKey().getNodeKey(),
- MAX_HTL, uid, null, headers, prb, false,
lm.getLocation().getValue(), cache);
- }
- boolean hasForwardedRejectedOverload = false;
- // Wait for status
- while(true) {
- synchronized(is) {
- if(is.getStatus() == CHKInsertSender.NOT_FINISHED) {
- try {
- is.wait(5*1000);
- } catch (InterruptedException e) {
- // Ignore
- }
- }
- if(is.getStatus() != CHKInsertSender.NOT_FINISHED)
break;
- }
- if((!hasForwardedRejectedOverload) &&
is.receivedRejectedOverload()) {
- hasForwardedRejectedOverload = true;
- insertThrottle.requestRejectedOverload();
- }
- }
-
- // Wait for completion
- while(true) {
- synchronized(is) {
- if(is.completed()) break;
- try {
- is.wait(10*1000);
- } catch (InterruptedException e) {
- // Go around again
- }
- }
- if(is.anyTransfersFailed() && (!hasForwardedRejectedOverload)) {
- hasForwardedRejectedOverload = true; // not strictly
true but same effect
- insertThrottle.requestRejectedOverload();
- }
- }
-
- Logger.minor(this, "Completed "+uid+"
overload="+hasForwardedRejectedOverload+" "+is.getStatusString());
-
- // Finished?
- if(!hasForwardedRejectedOverload) {
- // Is it ours? Did we send a request?
- if(is.sentRequest() && is.uid == uid && (is.getStatus() ==
CHKInsertSender.ROUTE_NOT_FOUND
- || is.getStatus() == CHKInsertSender.SUCCESS)) {
- // It worked!
- long endTime = System.currentTimeMillis();
- long len = endTime - startTime;
- insertThrottle.requestCompleted(len);
- }
- }
-
- if(is.getStatus() == CHKInsertSender.SUCCESS) {
- Logger.normal(this, "Succeeded inserting "+block);
- return;
- } else {
- int status = is.getStatus();
- String msg = "Failed inserting "+block+" :
"+is.getStatusString();
- if(status == CHKInsertSender.ROUTE_NOT_FOUND)
- msg += " - this is normal on small networks; the data
will still be propagated, but it can't find the 20+ nodes needed for full
success";
- if(is.getStatus() != CHKInsertSender.ROUTE_NOT_FOUND)
- Logger.error(this, msg);
- else
- Logger.normal(this, msg);
- switch(is.getStatus()) {
- case CHKInsertSender.NOT_FINISHED:
- Logger.error(this, "IS still running in putCHK!: "+is);
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- case CHKInsertSender.GENERATED_REJECTED_OVERLOAD:
- case CHKInsertSender.TIMED_OUT:
- throw new
LowLevelPutException(LowLevelPutException.REJECTED_OVERLOAD);
- case CHKInsertSender.ROUTE_NOT_FOUND:
- throw new
LowLevelPutException(LowLevelPutException.ROUTE_NOT_FOUND);
- case CHKInsertSender.ROUTE_REALLY_NOT_FOUND:
- throw new
LowLevelPutException(LowLevelPutException.ROUTE_REALLY_NOT_FOUND);
- case CHKInsertSender.INTERNAL_ERROR:
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- default:
- Logger.error(this, "Unknown CHKInsertSender code in
putCHK: "+is.getStatus()+" on "+is);
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- }
- }
- }
-
- public void realPutSSK(ClientSSKBlock block, boolean cache) throws
LowLevelPutException {
- byte[] data = block.getRawData();
- byte[] headers = block.getRawHeaders();
- SSKInsertSender is;
- long uid = random.nextLong();
- if(!lockUID(uid)) {
- Logger.error(this, "Could not lock UID just randomly generated:
"+uid+" - probably indicates broken PRNG");
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- }
- long startTime = System.currentTimeMillis();
- synchronized(this) {
- if(cache) {
- try {
- sskDatastore.put(block);
- } catch (IOException e) {
- Logger.error(this, "Datastore failure: "+e, e);
- }
- }
- is = makeInsertSender(block,
- MAX_HTL, uid, null, false, lm.getLocation().getValue(),
cache);
- }
- boolean hasForwardedRejectedOverload = false;
- // Wait for status
- while(true) {
- synchronized(is) {
- if(is.getStatus() == SSKInsertSender.NOT_FINISHED) {
- try {
- is.wait(5*1000);
- } catch (InterruptedException e) {
- // Ignore
- }
- }
- if(is.getStatus() != SSKInsertSender.NOT_FINISHED)
break;
- }
- if((!hasForwardedRejectedOverload) &&
is.receivedRejectedOverload()) {
- hasForwardedRejectedOverload = true;
- insertThrottle.requestRejectedOverload();
- }
- }
-
- // Wait for completion
- while(true) {
- synchronized(is) {
- if(is.getStatus() != SSKInsertSender.NOT_FINISHED)
break;
- try {
- is.wait(10*1000);
- } catch (InterruptedException e) {
- // Go around again
- }
- }
- }
-
- Logger.minor(this, "Completed "+uid+"
overload="+hasForwardedRejectedOverload+" "+is.getStatusString());
-
- // Finished?
- if(!hasForwardedRejectedOverload) {
- // Is it ours? Did we send a request?
- if(is.sentRequest() && is.uid == uid && (is.getStatus() ==
SSKInsertSender.ROUTE_NOT_FOUND
- || is.getStatus() == SSKInsertSender.SUCCESS)) {
- // It worked!
- long endTime = System.currentTimeMillis();
- long len = endTime - startTime;
- insertThrottle.requestCompleted(len);
- }
- }
-
- if(is.hasCollided()) {
- // Store it locally so it can be fetched immediately, and
overwrites any locally inserted.
- store(is.getBlock());
- throw new LowLevelPutException(LowLevelPutException.COLLISION);
- }
-
- if(is.getStatus() == SSKInsertSender.SUCCESS) {
- Logger.normal(this, "Succeeded inserting "+block);
- return;
- } else {
- int status = is.getStatus();
- String msg = "Failed inserting "+block+" :
"+is.getStatusString();
- if(status == CHKInsertSender.ROUTE_NOT_FOUND)
- msg += " - this is normal on small networks; the data
will still be propagated, but it can't find the 20+ nodes needed for full
success";
- if(is.getStatus() != SSKInsertSender.ROUTE_NOT_FOUND)
- Logger.error(this, msg);
- else
- Logger.normal(this, msg);
- switch(is.getStatus()) {
- case SSKInsertSender.NOT_FINISHED:
- Logger.error(this, "IS still running in putCHK!: "+is);
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- case SSKInsertSender.GENERATED_REJECTED_OVERLOAD:
- case SSKInsertSender.TIMED_OUT:
- throw new
LowLevelPutException(LowLevelPutException.REJECTED_OVERLOAD);
- case SSKInsertSender.ROUTE_NOT_FOUND:
- throw new
LowLevelPutException(LowLevelPutException.ROUTE_NOT_FOUND);
- case SSKInsertSender.ROUTE_REALLY_NOT_FOUND:
- throw new
LowLevelPutException(LowLevelPutException.ROUTE_REALLY_NOT_FOUND);
- case SSKInsertSender.INTERNAL_ERROR:
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- default:
- Logger.error(this, "Unknown CHKInsertSender code in
putSSK: "+is.getStatus()+" on "+is);
- throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
- }
- }
- }
-
- long lastAcceptedRequest = -1;
-
- public synchronized boolean shouldRejectRequest() {
- long now = System.currentTimeMillis();
- double pingTime = nodePinger.averagePingTime();
- if(pingTime > MAX_PING_TIME) {
- if(now - lastAcceptedRequest > MAX_INTERREQUEST_TIME) {
- lastAcceptedRequest = now;
- return false;
- }
- return true;
- }
- if(pingTime > SUB_MAX_PING_TIME) {
- double x = (pingTime - SUB_MAX_PING_TIME) / (MAX_PING_TIME -
SUB_MAX_PING_TIME);
- if(random.nextDouble() < x)
- return true;
- }
- lastAcceptedRequest = now;
- return false;
- }
-
- /**
- * Export my reference so that another node can connect to me.
- * @return
- */
- public SimpleFieldSet exportFieldSet() {
- SimpleFieldSet fs = new SimpleFieldSet();
- fs.put("physical.udp",
getPrimaryIPAddress().getHostAddress()+":"+portNumber);
- fs.put("identity", HexUtil.bytesToHex(myIdentity));
- fs.put("location", Double.toString(lm.getLocation().getValue()));
- fs.put("version", Version.getVersionString());
- fs.put("testnet", Boolean.toString(testnetEnabled));
- fs.put("lastGoodVersion", Version.getLastGoodVersionString());
- if(testnetEnabled)
- fs.put("testnetPort", Integer.toString(testnetPort));
- fs.put("myName", myName);
- Logger.minor(this, "My reference: "+fs);
- return fs;
- }
-
- InetAddress overrideIPAddress;
-
- /**
- * @return Our current main IP address.
- * FIXME - we should support more than 1, and we should do the
- * detection properly with NetworkInterface, and we should use
- * third parties if available and UP&P if available.
- */
- private InetAddress getPrimaryIPAddress() {
- if(overrideIPAddress != null) {
- Logger.minor(this, "Returning overridden address:
"+overrideIPAddress);
- return overrideIPAddress;
- }
- Logger.minor(this, "IP address not overridden");
- return ipDetector.getAddress();
- }
-
- /**
- * Do a routed ping of another node on the network by its location.
- * @param loc2 The location of the other node to ping. It must match
- * exactly.
- * @return The number of hops it took to find the node, if it was found.
- * Otherwise -1.
- */
- public int routedPing(double loc2) {
- long uid = random.nextLong();
- int initialX = random.nextInt();
- Message m = DMT.createFNPRoutedPing(uid, loc2, MAX_HTL, initialX);
- Logger.normal(this, "Message: "+m);
-
- dispatcher.handleRouted(m);
- // FIXME: might be rejected
- MessageFilter mf1 = MessageFilter.create().setField(DMT.UID,
uid).setType(DMT.FNPRoutedPong).setTimeout(5000);
- try {
- //MessageFilter mf2 = MessageFilter.create().setField(DMT.UID,
uid).setType(DMT.FNPRoutedRejected).setTimeout(5000);
- // Ignore Rejected - let it be retried on other peers
- m = usm.waitFor(mf1/*.or(mf2)*/);
- } catch (DisconnectedException e) {
- Logger.normal(this, "Disconnected in waiting for pong");
- return -1;
- }
- if(m == null) return -1;
- if(m.getSpec() == DMT.FNPRoutedRejected) return -1;
- return m.getInt(DMT.COUNTER) - initialX;
- }
-
- /**
- * Check the datastore, then if the key is not in the store,
- * check whether another node is requesting the same key at
- * the same HTL, and if all else fails, create a new
- * RequestSender for the key/htl.
- * @param closestLocation The closest location to the key so far.
- * @param localOnly If true, only check the datastore.
- * @return A CHKBlock if the data is in the store, otherwise
- * a RequestSender, unless the HTL is 0, in which case NULL.
- * RequestSender.
- */
- public synchronized Object makeRequestSender(Key key, short htl, long uid,
PeerNode source, double closestLocation, boolean localOnly, boolean cache) {
- Logger.minor(this,
"makeRequestSender("+key+","+htl+","+uid+","+source+") on "+portNumber);
- // In store?
- KeyBlock chk = null;
- try {
- if(key instanceof NodeCHK)
- chk = chkDatastore.fetch((NodeCHK)key, !cache);
- else if(key instanceof NodeSSK) {
- NodeSSK k = (NodeSSK)key;
- DSAPublicKey pubKey = k.getPubKey();
- if(pubKey == null) {
- pubKey = getKey(k.getPubKeyHash());
- Logger.minor(this, "Got pubkey: "+pubKey+"
"+(pubKey == null ? "" : pubKey.writeAsField()));
- try {
- k.setPubKey(pubKey);
- } catch (SSKVerifyException e) {
- Logger.error(this, "Error
setting pubkey: "+e, e);
- }
- }
- if(pubKey != null) {
- Logger.minor(this, "Got pubkey: "+pubKey+"
"+pubKey.writeAsField());
- chk = sskDatastore.fetch((NodeSSK)key, !cache);
- } else {
- Logger.minor(this, "Not found because no
pubkey: "+uid);
- }
- } else
- throw new IllegalStateException("Unknown key type:
"+key.getClass());
- } catch (IOException e) {
- Logger.error(this, "Error accessing store: "+e, e);
- }
- if(chk != null) return chk;
- if(localOnly) return null;
- Logger.minor(this, "Not in store locally");
-
- // Transfer coalescing - match key only as HTL irrelevant
- RequestSender sender = (RequestSender)
transferringRequestSenders.get(key);
- if(sender != null) {
- Logger.minor(this, "Data already being transferred: "+sender);
- return sender;
- }
-
- // HTL == 0 => Don't search further
- if(htl == 0) {
- Logger.minor(this, "No HTL");
- return null;
- }
-
- // Request coalescing
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- sender = (RequestSender) requestSenders.get(kh);
- if(sender != null) {
- Logger.minor(this, "Found sender: "+sender+" for "+uid);
- return sender;
- }
-
- sender = new RequestSender(key, null, htl, uid, this, closestLocation,
source);
- requestSenders.put(kh, sender);
- Logger.minor(this, "Created new sender: "+sender);
- return sender;
- }
-
- static class KeyHTLPair {
- final Key key;
- final short htl;
- KeyHTLPair(Key key, short htl) {
- this.key = key;
- this.htl = htl;
- }
-
- public boolean equals(Object o) {
- if(o instanceof KeyHTLPair) {
- KeyHTLPair p = (KeyHTLPair) o;
- return (p.key.equals(key) && p.htl == htl);
- } else return false;
- }
-
- public int hashCode() {
- return key.hashCode() ^ htl;
- }
-
- public String toString() {
- return key.toString()+":"+htl;
- }
- }
-
- /**
- * Add a RequestSender to our HashSet.
- */
- public synchronized void addSender(Key key, short htl, RequestSender
sender) {
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- requestSenders.put(kh, sender);
- }
-
- /**
- * Add a transferring RequestSender.
- */
- public synchronized void addTransferringSender(NodeCHK key, RequestSender
sender) {
- transferringRequestSenders.put(key, sender);
- }
-
- public synchronized SSKBlock fetch(NodeSSK key) {
- try {
- return sskDatastore.fetch(key, false);
- } catch (IOException e) {
- Logger.error(this, "Cannot fetch data: "+e, e);
- return null;
- }
- }
-
- public synchronized CHKBlock fetch(NodeCHK key) {
- try {
- return chkDatastore.fetch(key, false);
- } catch (IOException e) {
- Logger.error(this, "Cannot fetch data: "+e, e);
- return null;
- }
- }
-
- /**
- * Store a datum.
- */
- public synchronized void store(CHKBlock block) {
- try {
- chkDatastore.put(block);
- } catch (IOException e) {
- Logger.error(this, "Cannot store data: "+e, e);
- }
- }
-
- public synchronized void store(SSKBlock block) {
- try {
- sskDatastore.put(block);
- cacheKey(((NodeSSK)block.getKey()).getPubKeyHash(),
((NodeSSK)block.getKey()).getPubKey());
- } catch (IOException e) {
- Logger.error(this, "Cannot store data: "+e, e);
- }
- }
-
- /**
- * Remove a sender from the set of currently transferring senders.
- */
- public synchronized void removeTransferringSender(NodeCHK key,
RequestSender sender) {
- RequestSender rs = (RequestSender)
transferringRequestSenders.remove(key);
- if(rs != sender) {
- Logger.error(this, "Removed "+rs+" should be "+sender+" for
"+key+" in removeTransferringSender");
- }
- }
-
- /**
- * Remove a RequestSender from the map.
- */
- public synchronized void removeSender(Key key, short htl, RequestSender
sender) {
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- RequestSender rs = (RequestSender) requestSenders.remove(kh);
- if(rs != sender) {
- Logger.error(this, "Removed "+rs+" should be "+sender+" for
"+key+","+htl+" in removeSender");
- }
- }
-
- /**
- * Remove an CHKInsertSender from the map.
- */
- public void removeInsertSender(Key key, short htl, AnyInsertSender sender)
{
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- AnyInsertSender is = (AnyInsertSender) insertSenders.remove(kh);
- if(is != sender) {
- Logger.error(this, "Removed "+is+" should be "+sender+" for
"+key+","+htl+" in removeInsertSender");
- }
- }
-
- final boolean decrementAtMax;
- final boolean decrementAtMin;
-
- /**
- * Decrement the HTL according to the policy of the given
- * NodePeer if it is non-null, or do something else if it is
- * null.
- */
- public short decrementHTL(PeerNode source, short htl) {
- if(source != null)
- return source.decrementHTL(htl);
- // Otherwise...
- if(htl >= MAX_HTL) htl = MAX_HTL;
- if(htl <= 0) htl = 1;
- if(htl == MAX_HTL) {
- if(decrementAtMax) htl--;
- return htl;
- }
- if(htl == 1) {
- if(decrementAtMin) htl--;
- return htl;
- }
- return --htl;
- }
-
- /**
- * Fetch or create an CHKInsertSender for a given key/htl.
- * @param key The key to be inserted.
- * @param htl The current HTL. We can't coalesce inserts across
- * HTL's.
- * @param uid The UID of the caller's request chain, or a new
- * one. This is obviously not used if there is already an
- * CHKInsertSender running.
- * @param source The node that sent the InsertRequest, or null
- * if it originated locally.
- */
- public synchronized CHKInsertSender makeInsertSender(NodeCHK key, short
htl, long uid, PeerNode source,
- byte[] headers, PartiallyReceivedBlock prb, boolean fromStore,
double closestLoc, boolean cache) {
- Logger.minor(this,
"makeInsertSender("+key+","+htl+","+uid+","+source+",...,"+fromStore);
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- CHKInsertSender is = (CHKInsertSender) insertSenders.get(kh);
- if(is != null) {
- Logger.minor(this, "Found "+is+" for "+kh);
- return is;
- }
- if(fromStore && !cache)
- throw new IllegalArgumentException("From store = true but cache
= false !!!");
- is = new CHKInsertSender(key, uid, headers, htl, source, this, prb,
fromStore, closestLoc);
- Logger.minor(this, is.toString()+" for "+kh.toString());
- insertSenders.put(kh, is);
- return is;
- }
-
- /**
- * Fetch or create an SSKInsertSender for a given key/htl.
- * @param key The key to be inserted.
- * @param htl The current HTL. We can't coalesce inserts across
- * HTL's.
- * @param uid The UID of the caller's request chain, or a new
- * one. This is obviously not used if there is already an
- * SSKInsertSender running.
- * @param source The node that sent the InsertRequest, or null
- * if it originated locally.
- */
- public synchronized SSKInsertSender makeInsertSender(SSKBlock block, short
htl, long uid, PeerNode source,
- boolean fromStore, double closestLoc, boolean cache) {
- NodeSSK key = (NodeSSK) block.getKey();
- if(key.getPubKey() == null) {
- throw new IllegalArgumentException("No pub key when inserting");
- }
- cacheKey(key.getPubKeyHash(), key.getPubKey());
- Logger.minor(this,
"makeInsertSender("+key+","+htl+","+uid+","+source+",...,"+fromStore);
- KeyHTLPair kh = new KeyHTLPair(key, htl);
- SSKInsertSender is = (SSKInsertSender) insertSenders.get(kh);
- if(is != null) {
- Logger.minor(this, "Found "+is+" for "+kh);
- return is;
- }
- if(fromStore && !cache)
- throw new IllegalArgumentException("From store = true but cache
= false !!!");
- is = new SSKInsertSender(block, uid, htl, source, this, fromStore,
closestLoc);
- Logger.minor(this, is.toString()+" for "+kh.toString());
- insertSenders.put(kh, is);
- return is;
- }
-
- public boolean lockUID(long uid) {
- Logger.minor(this, "Locking "+uid);
- Long l = new Long(uid);
- synchronized(runningUIDs) {
- if(runningUIDs.contains(l)) return false;
- runningUIDs.add(l);
- return true;
- }
- }
-
- public void unlockUID(long uid) {
- Logger.minor(this, "Unlocking "+uid);
- Long l = new Long(uid);
- completed(uid);
- synchronized(runningUIDs) {
- if(!runningUIDs.remove(l))
- throw new IllegalStateException("Could not unlock "+uid+"!");
- }
- }
-
- /**
- * @return Some status information.
- */
- public String getStatus() {
- StringBuffer sb = new StringBuffer();
- if (peers != null)
- sb.append(peers.getStatus());
- sb.append("\nInserts: ");
- int x = insertSenders.size();
- sb.append(x);
- if(x < 5 && x > 0) {
- sb.append('\n');
- // Dump
- Iterator i = insertSenders.values().iterator();
- while(i.hasNext()) {
- CHKInsertSender s = (CHKInsertSender) i.next();
- sb.append(s.uid);
- sb.append(": ");
- sb.append(s.getStatusString());
- sb.append('\n');
- }
- }
- sb.append("\nRequests: ");
- sb.append(requestSenders.size());
- sb.append("\nTransferring requests: ");
- sb.append(this.transferringRequestSenders.size());
- return sb.toString();
- }
-
- /**
- * @return Data String for freeviz.
- */
- public String getFreevizOutput() {
- StringBuffer sb = new StringBuffer();
- sb.append("\nrequests=");
- sb.append(requestSenders.size());
-
- sb.append("\ntransferring_requests=");
- sb.append(this.transferringRequestSenders.size());
-
- sb.append("\ninserts=");
- sb.append(this.insertSenders.size());
- sb.append("\n");
-
-
- if (peers != null)
- sb.append(peers.getFreevizOutput());
-
- return sb.toString();
- }
-
- /**
- * @return Our reference, compressed
- */
- public byte[] myRefCompressed() {
- SimpleFieldSet fs = exportFieldSet();
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- OutputStreamWriter osw = new OutputStreamWriter(baos);
- try {
- fs.writeTo(osw);
- } catch (IOException e) {
- throw new Error(e);
- }
- try {
- osw.flush();
- } catch (IOException e1) {
- throw new Error(e1);
- }
- byte[] buf = baos.toByteArray();
- byte[] obuf = new byte[buf.length + 1];
- obuf[0] = 0;
- System.arraycopy(buf, 0, obuf, 1, buf.length);
- return obuf;
- // FIXME support compression when noderefs get big enough for it to be
useful
- }
-
- final LRUQueue recentlyCompletedIDs;
-
- static final int MAX_RECENTLY_COMPLETED_IDS = 10*1000;
-
- /**
- * Has a request completed with this ID recently?
- */
- public synchronized boolean recentlyCompleted(long id) {
- return recentlyCompletedIDs.contains(new Long(id));
- }
-
- /**
- * A request completed (regardless of success).
- */
- public synchronized void completed(long id) {
- recentlyCompletedIDs.push(new Long(id));
- while(recentlyCompletedIDs.size() > MAX_RECENTLY_COMPLETED_IDS)
- recentlyCompletedIDs.pop();
- }
-
- public synchronized void setName(String key) {
- myName = key;
- writeNodeFile();
- }
-
- public HighLevelSimpleClient makeClient(short prioClass, short prio) {
- return new HighLevelSimpleClientImpl(this, archiveManager,
tempBucketFactory, random, makeStarterClient(prioClass, prio, false),
makeStarterClient(prioClass, prio, true), !DONT_CACHE_LOCAL_REQUESTS);
- }
-
- private static class MemoryChecker implements Runnable {
-
- public void run() {
- Runtime r = Runtime.getRuntime();
- while(true) {
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // Ignore
- }
- Logger.minor(this, "Memory in use:
"+(r.totalMemory()-r.freeMemory()));
- }
- }
- }
-
- public RequestThrottle getRequestThrottle() {
- return requestThrottle;
- }
-
- public RequestThrottle getInsertThrottle() {
- return insertThrottle;
- }
-
- public RequestStarterClient makeStarterClient(short prioClass, short
prio, boolean inserts) {
- return new RequestStarterClient(prioClass, prio, random, this,
inserts ? insertStarter : requestStarter);
- }
-
- InetAddress lastIP;
-
- public void redetectAddress() {
- InetAddress newIP = ipDetector.getAddress();
- if(newIP.equals(lastIP)) return;
- writeNodeFile();
- }
-
- /**
- * Look up a cached public key by its hash.
- */
- public DSAPublicKey getKey(byte[] hash) {
- ImmutableByteArrayWrapper w = new
ImmutableByteArrayWrapper(hash);
- synchronized(cachedPubKeys) {
- DSAPublicKey key = (DSAPublicKey) cachedPubKeys.get(w);
- if(key != null) {
- cachedPubKeys.push(w, key);
- return key;
- }
- }
- try {
- DSAPublicKey key = pubKeyDatastore.fetchPubKey(hash,
false);
- if(key != null) {
- cacheKey(hash, key);
- }
- return key;
- } catch (IOException e) {
- // FIXME deal with disk full, access perms etc; tell
user about it.
- Logger.error(this, "Error accessing pubkey store: "+e,
e);
- return null;
- }
- }
-
- /**
- * Cache a public key
- */
- public void cacheKey(byte[] hash, DSAPublicKey key) {
- ImmutableByteArrayWrapper w = new
ImmutableByteArrayWrapper(hash);
- synchronized(cachedPubKeys) {
- DSAPublicKey key2 = (DSAPublicKey) cachedPubKeys.get(w);
- if(key2 != null && !key2.equals(key)) {
- MessageDigest md256;
- // Check the hash.
- try {
- md256 =
MessageDigest.getInstance("SHA-256");
- } catch (NoSuchAlgorithmException e) {
- throw new Error(e);
- }
- byte[] hashCheck = md256.digest(key.asBytes());
- if(Arrays.equals(hashCheck, hash)) {
- Logger.error(this, "Hash is
correct!!!");
- // Verify the old key
- byte[] oldHash =
md256.digest(key2.asBytes());
- if(Arrays.equals(oldHash, hash)) {
- Logger.error(this, "Old hash is
correct too!! - Bug in DSAPublicKey.equals() or SHA-256 collision!");
- } else {
- Logger.error(this, "Old hash is
wrong!");
- cachedPubKeys.removeKey(w);
- cacheKey(hash, key);
- }
- } else {
- Logger.error(this, "New hash is wrong");
- }
- throw new IllegalArgumentException("Wrong
hash?? Already have different key with same hash!");
- }
- cachedPubKeys.push(w, key);
- while(cachedPubKeys.size() > MAX_CACHED_KEYS)
- cachedPubKeys.popKey();
- }
- try {
- pubKeyDatastore.put(hash, key);
- } catch (IOException e) {
- // FIXME deal with disk full, access perms etc; tell
user about it.
- Logger.error(this, "Error accessing pubkey store: "+e,
e);
- }
- }
-}
Copied: branches/async-client-layer/src/freenet/node/Node.java (from rev 7873,
trunk/freenet/src/freenet/node/Node.java)
Deleted: branches/async-client-layer/src/freenet/node/QueuedDataRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/QueuedDataRequest.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/node/QueuedDataRequest.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,26 +0,0 @@
-package freenet.node;
-
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-public class QueuedDataRequest extends QueuedRequest {
-
- private final ClientKey key;
- private final boolean localOnly;
- private final boolean cache;
- private QueueingSimpleLowLevelClient client;
-
- public QueuedDataRequest(ClientKey key, boolean localOnly, boolean
cache, QueueingSimpleLowLevelClient client) {
- this.key = key;
- this.localOnly = localOnly;
- this.client = client;
- this.cache = cache;
- }
-
- public ClientKeyBlock waitAndFetch() throws LowLevelGetException {
- waitForSendClearance();
- return client.realGetKey(key, localOnly, cache);
- }
-
-}
Copied: branches/async-client-layer/src/freenet/node/QueuedDataRequest.java
(from rev 7873, trunk/freenet/src/freenet/node/QueuedDataRequest.java)
Deleted:
branches/async-client-layer/src/freenet/node/QueueingSimpleLowLevelClient.java
===================================================================
--- trunk/freenet/src/freenet/node/QueueingSimpleLowLevelClient.java
2006-01-18 00:45:46 UTC (rev 7871)
+++
branches/async-client-layer/src/freenet/node/QueueingSimpleLowLevelClient.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,17 +0,0 @@
-package freenet.node;
-
-import freenet.client.InsertBlock;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-interface QueueingSimpleLowLevelClient extends SimpleLowLevelClient {
-
- /** Unqueued version. Only call from QueuedDataRequest ! */
- ClientKeyBlock realGetKey(ClientKey key, boolean localOnly, boolean
cache) throws LowLevelGetException;
-
- /** Ditto */
- void realPut(ClientKeyBlock block, boolean cache) throws
LowLevelPutException;
-
-}
Copied:
branches/async-client-layer/src/freenet/node/QueueingSimpleLowLevelClient.java
(from rev 7873,
trunk/freenet/src/freenet/node/QueueingSimpleLowLevelClient.java)
Deleted:
branches/async-client-layer/src/freenet/node/RealNodeRequestInsertTest.java
===================================================================
--- trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
2006-01-18 00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/node/RealNodeRequestInsertTest.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,212 +0,0 @@
-package freenet.node;
-
-import java.io.File;
-import java.util.Arrays;
-
-import freenet.crypt.DiffieHellman;
-import freenet.crypt.DummyRandomSource;
-import freenet.io.comm.PeerParseException;
-import freenet.keys.CHKEncodeException;
-import freenet.keys.ClientCHK;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.node.PeerNode;
-import freenet.support.Fields;
-import freenet.support.FileLoggerHook;
-import freenet.support.HexUtil;
-import freenet.support.Logger;
-import freenet.support.SimpleFieldSet;
-import freenet.support.math.BootstrappingDecayingRunningAverage;
-import freenet.support.math.RunningAverage;
-import freenet.support.math.SimpleRunningAverage;
-
-/**
- * @author amphibian
- */
-public class RealNodeRequestInsertTest {
-
- static final int NUMBER_OF_NODES = 10;
-
- public static void main(String[] args) throws FSParseException,
PeerParseException, CHKEncodeException {
- PeerNode.disableProbabilisticHTLs = true;
- String wd = "realNodeRequestInsertTest";
- new File(wd).mkdir();
- // Don't clobber nearby nodes!
- Node.MAX_HTL = 5;
- FileLoggerHook fh = Logger.setupStdoutLogging(Logger.DEBUG,
"freenet.store:minor,freenet.node.Location:normal"
/*"freenet.node.LocationManager:debug,freenet.node.FNPPacketManager:normal,freenet.io.comm.UdpSocketManager:debug"*/);
- Logger.globalSetThreshold(Logger.DEBUG);
- System.out.println("Insert/retrieve test");
- System.out.println();
- DummyRandomSource random = new DummyRandomSource();
- DiffieHellman.init(random);
- Node[] nodes = new Node[NUMBER_OF_NODES];
- Logger.normal(RealNodeRoutingTest.class, "Creating nodes...");
- for(int i=0;i<NUMBER_OF_NODES;i++) {
- nodes[i] = new Node(5000+i, random, null, wd+File.separator, 0,
false, fh, 100);
- nodes[i].usm.setDropProbability(20); // 5%
- Logger.normal(RealNodeRoutingTest.class, "Created node "+i);
- }
- SimpleFieldSet refs[] = new SimpleFieldSet[NUMBER_OF_NODES];
- for(int i=0;i<NUMBER_OF_NODES;i++)
- refs[i] = nodes[i].exportFieldSet();
- Logger.normal(RealNodeRoutingTest.class, "Created "+NUMBER_OF_NODES+"
nodes");
- // Now link them up
- // Connect the set
- for(int i=0;i<NUMBER_OF_NODES;i++) {
- int next = (i+1) % NUMBER_OF_NODES;
- int prev = (i+NUMBER_OF_NODES-1)%NUMBER_OF_NODES;
- nodes[i].peers.connect(refs[next]);
- nodes[i].peers.connect(refs[prev]);
- }
- Logger.normal(RealNodeRoutingTest.class, "Connected nodes");
- // Now add some random links
- for(int i=0;i<NUMBER_OF_NODES*5;i++) {
- if(i % NUMBER_OF_NODES == 0)
- Logger.normal(RealNodeRoutingTest.class, ""+i);
- int length = (int)Math.pow(NUMBER_OF_NODES, random.nextDouble());
- int nodeA = random.nextInt(NUMBER_OF_NODES);
- int nodeB = (nodeA+length)%NUMBER_OF_NODES;
- //System.out.println(""+nodeA+" -> "+nodeB);
- Node a = nodes[nodeA];
- Node b = nodes[nodeB];
- a.peers.connect(b.exportFieldSet());
- b.peers.connect(a.exportFieldSet());
- }
-
- RequestStarterClient[] starters = new
RequestStarterClient[NUMBER_OF_NODES];
- for(int i=0;i<starters.length;i++)
- starters[i] =
nodes[i].makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0,
false); // pretend are all requests
-
- Logger.normal(RealNodeRoutingTest.class, "Added random links");
-
- SwapRequestInterval sri =
- new
CPUAdjustingSwapRequestInterval(((500*1000*NUMBER_OF_NODES)/200), 50);
-
- for(int i=0;i<NUMBER_OF_NODES;i++)
- nodes[i].start(sri);
-
- // Now sit back and watch the fireworks!
- int cycleNumber = 0;
- int lastSwaps = 0;
- int lastNoSwaps = 0;
- int failures = 0;
- int successes = 0;
- RunningAverage avg = new SimpleRunningAverage(100, 0.0);
- RunningAverage avg2 = new BootstrappingDecayingRunningAverage(0.0,
0.0, 1.0, 100);
- int pings = 0;
- while(true) {
- cycleNumber++;
- try {
- Thread.sleep(5000);
- } catch (InterruptedException e) {
- // Ignore
- }
- for(int i=0;i<NUMBER_OF_NODES;i++) {
- Logger.normal(RealNodeRoutingTest.class, "Cycle
"+cycleNumber+" node "+i+": "+nodes[i].lm.getLocation().getValue());
- }
- int newSwaps = LocationManager.swaps;
- int totalStarted = LocationManager.startedSwaps;
- int noSwaps = LocationManager.noSwaps;
- Logger.normal(RealNodeRoutingTest.class, "Swaps:
"+(newSwaps-lastSwaps));
- Logger.normal(RealNodeRoutingTest.class, "\nTotal swaps:
Started*2: "+totalStarted*2+", succeeded: "+newSwaps+", last minute failures:
"+noSwaps+
- ", ratio "+(double)noSwaps/(double)newSwaps+", early
failures: "+((totalStarted*2)-(noSwaps+newSwaps)));
- Logger.normal(RealNodeRoutingTest.class, "This cycle ratio:
"+((double)(noSwaps-lastNoSwaps)) / ((double)(newSwaps - lastSwaps)));
- lastNoSwaps = noSwaps;
- Logger.normal(RealNodeRoutingTest.class, "Swaps rejected (already
locked): "+LocationManager.swapsRejectedAlreadyLocked);
- Logger.normal(RealNodeRoutingTest.class, "Swaps rejected (nowhere
to go): "+LocationManager.swapsRejectedNowhereToGo);
- Logger.normal(RealNodeRoutingTest.class, "Swaps rejected (rate
limit): "+LocationManager.swapsRejectedRateLimit);
- Logger.normal(RealNodeRoutingTest.class, "Swaps rejected (loop):
"+LocationManager.swapsRejectedLoop);
- Logger.normal(RealNodeRoutingTest.class, "Swaps rejected
(recognized ID):" +LocationManager.swapsRejectedRecognizedID);
- lastSwaps = newSwaps;
- // Do some (routed) test-pings
- for(int i=0;i<10;i++) {
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e1) {
- }
- try {
- Node randomNode = nodes[random.nextInt(NUMBER_OF_NODES)];
- Node randomNode2 = randomNode;
- while(randomNode2 == randomNode)
- randomNode2 = nodes[random.nextInt(NUMBER_OF_NODES)];
- Logger.normal(RealNodeRoutingTest.class, "Pinging
"+randomNode2.portNumber+" from "+randomNode.portNumber);
- double loc2 = randomNode2.lm.getLocation().getValue();
- int hopsTaken = randomNode.routedPing(loc2);
- pings++;
- if(hopsTaken < 0) {
- failures++;
- avg.report(0.0);
- avg2.report(0.0);
- double ratio = (double)successes /
((double)(failures+successes));
- Logger.normal(RealNodeRoutingTest.class, "Routed ping
"+pings+" FAILED from "+randomNode.portNumber+" to "+randomNode2.portNumber+"
(long:"+ratio+", short:"+avg.currentValue()+", vague:"+avg2.currentValue()+")");
- } else {
- successes++;
- avg.report(1.0);
- avg2.report(1.0);
- double ratio = (double)successes /
((double)(failures+successes));
- Logger.normal(RealNodeRoutingTest.class, "Routed ping
"+pings+" success: "+hopsTaken+" "+randomNode.portNumber+" to
"+randomNode2.portNumber+" (long:"+ratio+", short:"+avg.currentValue()+",
vague:"+avg2.currentValue()+")");
- }
- } catch (Throwable t) {
- Logger.error(RealNodeRoutingTest.class, "Caught "+t, t);
- }
- }
- if(pings > 10 && avg.currentValue() > 0.98 && ((double)successes /
((double)(failures+successes)) > 0.98)) {
- break;
- }
- }
- System.out.println();
- System.out.println("Ping average > 98%, lets do some
inserts/requests");
- System.out.println();
- int requestNumber = 0;
- RunningAverage requestsAvg = new SimpleRunningAverage(100, 0.0);
- String baseString = "" + System.currentTimeMillis() + " ";
- while(true) {
- try {
- requestNumber++;
- try {
- Thread.sleep(5000);
- } catch (InterruptedException e1) {
- }
- String dataString = baseString + requestNumber;
- // Pick random node to insert to
- int node1 = random.nextInt(NUMBER_OF_NODES);
- Node randomNode = nodes[node1];
- Logger.error(RealNodeRequestInsertTest.class,"Inserting:
\""+dataString+"\" to "+node1);
- byte[] data = dataString.getBytes();
- ClientCHKBlock block;
- block = ClientCHKBlock.encode(data, false, false, (short)-1,
0);
- ClientCHK chk = (ClientCHK) block.getClientKey();
- byte[] encData = block.getData();
- byte[] encHeaders = block.getHeaders();
- ClientCHKBlock newBlock = new ClientCHKBlock(encData,
encHeaders, chk, true);
- Logger.error(RealNodeRequestInsertTest.class, "Decoded: "+new
String(newBlock.memoryDecode()));
- Logger.error(RealNodeRequestInsertTest.class,"CHK:
"+chk.getURI());
- Logger.error(RealNodeRequestInsertTest.class,"Headers:
"+HexUtil.bytesToHex(block.getHeaders()));
- randomNode.putKey(block, starters[node1], true);
- Logger.error(RealNodeRequestInsertTest.class, "Inserted to
"+node1);
- Logger.error(RealNodeRequestInsertTest.class, "Data:
"+Fields.hashCode(encData)+", Headers: "+Fields.hashCode(encHeaders));
- // Pick random node to request from
- int node2;
- do {
- node2 = random.nextInt(NUMBER_OF_NODES);
- } while(node2 == node1);
- Node fetchNode = nodes[node2];
- block = (ClientCHKBlock) fetchNode.getKey((ClientKey) chk,
false, starters[node2], true);
- if(block == null) {
- Logger.error(RealNodeRequestInsertTest.class, "Fetch
FAILED from "+node2);
- requestsAvg.report(0.0);
- } else {
- byte[] results = block.memoryDecode();
- requestsAvg.report(1.0);
- if(Arrays.equals(results, data)) {
- Logger.error(RealNodeRequestInsertTest.class, "Fetch
succeeded: "+new String(results));
- } else {
- Logger.error(RealNodeRequestInsertTest.class,
"Returned invalid data!: "+new String(results));
- }
- }
- } catch (Throwable t) {
- Logger.error(RealNodeRequestInsertTest.class, "Caught "+t, t);
- }
- }
- }
-}
Copied:
branches/async-client-layer/src/freenet/node/RealNodeRequestInsertTest.java
(from rev 7873, trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java)
Deleted: branches/async-client-layer/src/freenet/node/RequestHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestHandler.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/node/RequestHandler.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,185 +0,0 @@
-package freenet.node;
-
-import freenet.crypt.DSAPublicKey;
-import freenet.io.comm.DMT;
-import freenet.io.comm.Message;
-import freenet.io.xfer.BlockTransmitter;
-import freenet.io.xfer.PartiallyReceivedBlock;
-import freenet.keys.CHKBlock;
-import freenet.keys.Key;
-import freenet.keys.KeyBlock;
-import freenet.keys.NodeCHK;
-import freenet.keys.NodeSSK;
-import freenet.keys.SSKBlock;
-import freenet.support.Logger;
-
-/**
- * Handle an incoming request. Does not do the actual fetching; that
- * is separated off into RequestSender so we get transfer coalescing
- * and both ends for free.
- */
-public class RequestHandler implements Runnable {
-
- final Message req;
- final Node node;
- final long uid;
- private short htl;
- final PeerNode source;
- private double closestLoc;
- private boolean needsPubKey;
- final Key key;
-
- public String toString() {
- return super.toString()+" for "+uid;
- }
-
- public RequestHandler(Message m, long id, Node n) {
- req = m;
- node = n;
- uid = id;
- htl = req.getShort(DMT.HTL);
- source = (PeerNode) req.getSource();
- closestLoc = req.getDouble(DMT.NEAREST_LOCATION);
- double myLoc = n.lm.getLocation().getValue();
- // FIXME should be more generic when implement SSKs
- key = (Key) req.getObject(DMT.FREENET_ROUTING_KEY);
- double keyLoc = key.toNormalizedDouble();
- if(Math.abs(keyLoc - myLoc) < Math.abs(keyLoc - closestLoc))
- closestLoc = myLoc;
- if(key instanceof NodeSSK)
- needsPubKey = m.getBoolean(DMT.NEED_PUB_KEY);
- }
-
- public void run() {
- try {
- Logger.minor(this, "Handling a request: "+uid);
- htl = source.decrementHTL(htl);
-
- Message accepted = DMT.createFNPAccepted(uid);
- source.send(accepted);
-
- Object o = node.makeRequestSender(key, htl, uid, source, closestLoc,
false, true);
- if(o instanceof KeyBlock) {
- KeyBlock block = (KeyBlock) o;
- Message df = createDataFound(block);
- source.send(df);
- if(key instanceof NodeSSK) {
- if(needsPubKey) {
- DSAPublicKey key =
((NodeSSK)block.getKey()).getPubKey();
- Message pk = DMT.createFNPSSKPubKey(uid, key.asBytes());
- Logger.minor(this, "Sending PK: "+key+"
"+key.writeAsField());
- source.send(pk);
- }
- }
- if(block instanceof CHKBlock) {
- PartiallyReceivedBlock prb =
- new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK,
Node.PACKET_SIZE, block.getRawData());
- BlockTransmitter bt =
- new BlockTransmitter(node.usm, source, uid, prb);
- bt.send();
- }
- return;
- }
- RequestSender rs = (RequestSender) o;
-
- if(rs == null) { // ran out of htl?
- Message dnf = DMT.createFNPDataNotFound(uid);
- source.send(dnf);
- return;
- }
-
- boolean shouldHaveStartedTransfer = false;
-
- while(true) {
-
- if(rs.waitUntilStatusChange()) {
- // Forward RejectedOverload
- Message msg = DMT.createFNPRejectedOverload(uid, false);
- source.sendAsync(msg, null);
- }
-
- if(rs.transferStarted()) {
- // Is a CHK.
- Message df = DMT.createFNPCHKDataFound(uid, rs.getHeaders());
- source.send(df);
- PartiallyReceivedBlock prb = rs.getPRB();
- BlockTransmitter bt =
- new BlockTransmitter(node.usm, source, uid, prb);
- bt.send(); // either fails or succeeds; other side will see, we
don't care
- return;
- }
-
- int status = rs.getStatus();
-
- switch(status) {
- case RequestSender.NOT_FINISHED:
- continue;
- case RequestSender.DATA_NOT_FOUND:
- Message dnf = DMT.createFNPDataNotFound(uid);
- source.sendAsync(dnf, null);
- return;
- case RequestSender.GENERATED_REJECTED_OVERLOAD:
- case RequestSender.TIMED_OUT:
- case RequestSender.INTERNAL_ERROR:
- // Locally generated.
- // Propagate back to source who needs to reduce send rate
- Message reject = DMT.createFNPRejectedOverload(uid, true);
- source.sendAsync(reject, null);
- return;
- case RequestSender.ROUTE_NOT_FOUND:
- // Tell source
- Message rnf = DMT.createFNPRouteNotFound(uid, rs.getHTL());
- source.sendAsync(rnf, null);
- return;
- case RequestSender.SUCCESS:
- if(key instanceof NodeSSK) {
- Message df = DMT.createFNPSSKDataFound(uid,
rs.getHeaders(), rs.getSSKData());
- source.send(df);
- if(needsPubKey) {
- Message pk = DMT.createFNPSSKPubKey(uid,
((NodeSSK)rs.getSSKBlock().getKey()).getPubKey().asBytes());
- source.send(df);
- }
- } else if(!rs.transferStarted()) {
- Logger.error(this, "Status is SUCCESS but we
never started a transfer on "+uid);
- }
- return;
- case RequestSender.VERIFY_FAILURE:
- if(key instanceof NodeCHK) {
- if(shouldHaveStartedTransfer)
- throw new IllegalStateException("Got
status code "+status+" but transfer not started");
- shouldHaveStartedTransfer = true;
- continue; // should have started transfer
- }
- reject = DMT.createFNPRejectedOverload(uid, true);
- source.sendAsync(reject, null);
- return;
- case RequestSender.TRANSFER_FAILED:
- if(key instanceof NodeCHK) {
- if(shouldHaveStartedTransfer)
- throw new IllegalStateException("Got
status code "+status+" but transfer not started");
- shouldHaveStartedTransfer = true;
- continue; // should have started transfer
- }
- // Other side knows, right?
- return;
- default:
- throw new IllegalStateException("Unknown status code
"+status);
- }
- }
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- } finally {
- node.unlockUID(uid);
- }
- }
-
- private Message createDataFound(KeyBlock block) {
- if(block instanceof CHKBlock)
- return DMT.createFNPCHKDataFound(uid,
block.getRawHeaders());
- else if(block instanceof SSKBlock)
- return DMT.createFNPSSKDataFound(uid,
block.getRawHeaders(), block.getRawData());
- else
- throw new IllegalStateException("Unknown key block
type: "+block.getClass());
- }
-
-}
Copied: branches/async-client-layer/src/freenet/node/RequestHandler.java (from
rev 7873, trunk/freenet/src/freenet/node/RequestHandler.java)
Deleted: branches/async-client-layer/src/freenet/node/RequestStarterClient.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestStarterClient.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/node/RequestStarterClient.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,118 +0,0 @@
-package freenet.node;
-
-import java.util.Vector;
-
-import freenet.crypt.RandomSource;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.ClientSSKBlock;
-import freenet.keys.KeyBlock;
-import freenet.support.DoublyLinkedList;
-import freenet.support.UpdatableSortedLinkedListItemImpl;
-
-/**
- * Interface to clients for starting a request.
- * Also represents a single client for fairness purposes.
- */
-public class RequestStarterClient extends UpdatableSortedLinkedListItemImpl {
-
- final int priority;
- private int random;
- private long cycleLastSent;
- private final Vector requests;
- private final RandomSource rs;
- private final QueueingSimpleLowLevelClient client;
- private final RequestStarter starter;
-
- public RequestStarterClient(short prioClass, short prio, RandomSource
r, QueueingSimpleLowLevelClient c, RequestStarter starter) {
- this((prioClass << 16) + prio, r, c, starter);
- }
-
- private RequestStarterClient(int prio, RandomSource r,
QueueingSimpleLowLevelClient c, RequestStarter starter) {
- priority = prio;
- this.random = r.nextInt();
- this.starter = starter;
- this.cycleLastSent = -1;
- this.requests = new Vector();
- this.rs = r;
- this.client = c;
- starter.registerClient(this);
- }
-
- /**
- * Blocking fetch of a key.
- * @throws LowLevelGetException If the fetch failed for some reason.
- */
- public ClientKeyBlock getKey(ClientKey key, boolean localOnly, boolean
cache) throws LowLevelGetException {
- QueuedDataRequest qdr = new QueuedDataRequest(key, localOnly,
cache, client);
- addRequest(qdr);
- return qdr.waitAndFetch();
- }
-
- /**
- * Blocking insert of a key.
- * @throws LowLevelPutException If the fetch failed for some reason.
- */
- public void putKey(ClientKeyBlock block, boolean cache) throws
LowLevelPutException {
- QueuedInsertRequest qir = new QueuedInsertRequest(block,
client, cache);
- addRequest(qir);
- qir.waitAndPut();
- }
-
- void addRequest(QueuedRequest qr) {
- synchronized(this) {
- requests.add(qr);
- }
- if(starter != null)
- starter.notifyReady(this);
- }
-
- public long getCycleLastSent() {
- return cycleLastSent;
- }
-
- private DoublyLinkedList parentList;
-
- public DoublyLinkedList getParent() {
- return parentList;
- }
-
- public DoublyLinkedList setParent(DoublyLinkedList l) {
- DoublyLinkedList oldList = parentList;
- parentList = l;
- return oldList;
- }
-
- public int compareTo(Object o) {
- if(this == o) return 0;
- RequestStarterClient c = (RequestStarterClient) o;
- if(priority > c.priority) return 1;
- if(priority < c.priority) return -1;
- if(random > c.random) return 1;
- if(random < c.random) return -1;
- return 0;
- }
-
- public synchronized boolean isReady() {
- return !requests.isEmpty();
- }
-
- public boolean send(long cycleNumber) {
- QueuedRequest qr;
- synchronized(this) {
- if(!requests.isEmpty()) {
- int x = rs.nextInt(requests.size());
- qr = (QueuedRequest) requests.remove(x);
- } else qr = null;
- }
- if(qr == null) return false;
- qr.clearToSend();
- return true;
- }
-
- public void setCycleLastSet(long cycleNumber) {
- this.cycleLastSent = cycleNumber;
- }
-
-}
Copied: branches/async-client-layer/src/freenet/node/RequestStarterClient.java
(from rev 7873, trunk/freenet/src/freenet/node/RequestStarterClient.java)
Deleted: branches/async-client-layer/src/freenet/node/SimpleLowLevelClient.java
===================================================================
--- trunk/freenet/src/freenet/node/SimpleLowLevelClient.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/node/SimpleLowLevelClient.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,31 +0,0 @@
-package freenet.node;
-
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-/**
- * @author amphibian
- *
- * Simple client interface... fetch and push single CHKs. No
- * splitfile decoding, no DBRs, no SSKs, for now.
- *
- * We can build higher layers on top of this.
- */
-public interface SimpleLowLevelClient {
-
- /**
- * Fetch a key. Throws if it cannot fetch it.
- * @param cache If false, don't cache the data. See the comments at the top
- * of Node.java.
- */
- public ClientKeyBlock getKey(ClientKey key, boolean localOnly,
RequestStarterClient client, boolean cache) throws LowLevelGetException;
-
- /**
- * Insert a key.
- * @param cache If false, don't cache the data. See the comments at the top
- * of Node.java.
- */
- public void putKey(ClientKeyBlock key, RequestStarterClient sctx, boolean
cache) throws LowLevelPutException;
-}
Copied: branches/async-client-layer/src/freenet/node/SimpleLowLevelClient.java
(from rev 7873, trunk/freenet/src/freenet/node/SimpleLowLevelClient.java)
Deleted:
branches/async-client-layer/src/freenet/node/TextModeClientInterface.java
===================================================================
--- trunk/freenet/src/freenet/node/TextModeClientInterface.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/node/TextModeClientInterface.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,741 +0,0 @@
-package freenet.node;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.MalformedURLException;
-import java.util.HashMap;
-import java.util.Hashtable;
-import java.util.Iterator;
-
-import freenet.client.ClientMetadata;
-import freenet.client.DefaultMIMETypes;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.HighLevelSimpleClient;
-import freenet.client.InsertBlock;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.client.events.EventDumper;
-import freenet.crypt.RandomSource;
-import freenet.io.comm.PeerParseException;
-import freenet.keys.FreenetURI;
-import freenet.keys.InsertableClientSSK;
-import freenet.support.ArrayBucket;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.HexUtil;
-import freenet.support.Logger;
-import freenet.support.SimpleFieldSet;
-import freenet.support.io.FileBucket;
-
-/**
- * @author amphibian
- *
- * Read commands to fetch or put from stdin.
- *
- * Execute them.
- */
-public class TextModeClientInterface implements Runnable {
-
- final RandomSource r;
- final Node n;
- final HighLevelSimpleClient client;
- final Hashtable streams;
- final RequestStarterClient requestStarterClient;
- final RequestStarterClient insertStarterClient;
- final File downloadsDir;
-
- TextModeClientInterface(Node n) {
- this.n = n;
- client = n.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS,
(short)0);
- client.addGlobalHook(new EventDumper());
- this.r = n.random;
- streams = new Hashtable();
- new Thread(this, "Text mode client interface").start();
- this.requestStarterClient =
n.makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0, false);
- this.insertStarterClient =
n.makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0, true);
- this.downloadsDir = n.downloadDir;
- }
-
- /**
- * Read commands, run them
- */
- public void run() {
- printHeader();
- // Read command, and data
- BufferedReader reader = new BufferedReader(new
InputStreamReader(System.in));
- while(true) {
- try {
- processLine(reader);
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- System.out.println("Caught: "+t);
- t.printStackTrace();
- }
- }
- }
-
- private void printHeader() {
- System.out.println("Freenet 0.7 Trivial Node Test Interface");
- System.out.println("---------------------------------------");
- System.out.println();
- System.out.println("Build "+Version.buildNumber);
- System.out.println("Enter one of the following commands:");
- System.out.println("GET:<Freenet key> - Fetch a key");
- System.out.println("PUT:\n<text, until a . on a line by itself> -
Insert the document and return the key.");
- System.out.println("PUT:<text> - Put a single line of text to a CHK
and return the key.");
- System.out.println("GETCHK:\n<text, until a . on a line by itself> -
Get the key that would be returned if the document was inserted.");
- System.out.println("GETCHK:<text> - Get the key that would be returned
if the line was inserted.");
- System.out.println("PUTFILE:<filename> - Put a file from disk.");
- System.out.println("GETFILE:<filename> - Fetch a key and put it in a
file. If the key includes a filename we will use it but we will not overwrite
local files.");
- System.out.println("GETCHKFILE:<filename> - Get the key that would be
returned if we inserted the file.");
- System.out.println("PUTDIR:<path>[#<defaultfile>] - Put the entire
directory from disk.");
- System.out.println("GETCHKDIR:<path>[#<defaultfile>] - Get the key
that would be returned if we'd put the entire directory from disk.");
- System.out.println("MAKESSK - Create an SSK keypair.");
- System.out.println("PUTSSK:<insert uri>;<url to redirect to> - Insert
an SSK redirect to a file already inserted.");
- System.out.println("PUTSSKDIR:<insert uri>#<path>[#<defaultfile>] -
Insert an entire directory to an SSK.");
-// System.out.println("PUBLISH:<name> - create a publish/subscribe
stream called <name>");
-// System.out.println("PUSH:<name>:<text> - publish a single line of
text to the stream named");
-// System.out.println("SUBSCRIBE:<key> - subscribe to a
publish/subscribe stream by key");
- System.out.println("CONNECT:<filename> - connect to a node from its
ref in a file.");
- System.out.println("CONNECT:\n<noderef including an End on a line by
itself> - enter a noderef directly.");
- System.out.println("DISCONNECT:<ip:port> - disconnect from a node by
providing it's ip+port");
- System.out.println("NAME:<new node name> - change the node's name.");
-// System.out.println("SUBFILE:<filename> - append all data received
from subscriptions to a file, rather than sending it to stdout.");
-// System.out.println("SAY:<text> - send text to the last
created/pushed stream");
- System.out.println("STATUS - display some status information on the
node including its reference and connections.");
- System.out.println("QUIT - exit the program");
- if(n.testnetEnabled) {
- System.out.println("WARNING: TESTNET MODE ENABLED. YOU HAVE NO
ANONYMITY.");
- }
- }
-
- /**
- * Process a single command.
- * @throws IOException If we could not write the data to stdout.
- */
- private void processLine(BufferedReader reader) throws IOException {
- String line;
- try {
- line = reader.readLine();
- } catch (IOException e) {
- System.err.println("Bye... ("+e+")");
- return;
- }
- boolean getCHKOnly = false;
- if(line == null) line = "QUIT";
- String uline = line.toUpperCase();
- Logger.minor(this, "Command: "+line);
- if(uline.startsWith("GET:")) {
- // Should have a key next
- String key = line.substring("GET:".length());
- while(key.length() > 0 && key.charAt(0) == ' ')
- key = key.substring(1);
- while(key.length() > 0 && key.charAt(key.length()-1) == ' ')
- key = key.substring(0, key.length()-2);
- Logger.normal(this, "Key: "+key);
- FreenetURI uri;
- try {
- uri = new FreenetURI(key);
- Logger.normal(this, "Key: "+uri);
- } catch (MalformedURLException e2) {
- System.out.println("Malformed URI: "+key+" : "+e2);
- return;
- }
- try {
- FetchResult result = client.fetch(uri);
- ClientMetadata cm = result.getMetadata();
- System.out.println("Content MIME type:
"+cm.getMIMEType());
- System.out.println("Data:\n");
- Bucket data = result.asBucket();
- BucketTools.copyTo(data, System.out,
Long.MAX_VALUE);
- System.out.println();
- } catch (FetchException e) {
- System.out.println("Error: "+e.getMessage());
- }
- } else if(uline.startsWith("GETFILE:")) {
- // Should have a key next
- String key = line.substring("GETFILE:".length());
- while(key.length() > 0 && key.charAt(0) == ' ')
- key = key.substring(1);
- while(key.length() > 0 && key.charAt(key.length()-1) == ' ')
- key = key.substring(0, key.length()-2);
- Logger.normal(this, "Key: "+key);
- FreenetURI uri;
- try {
- uri = new FreenetURI(key);
- } catch (MalformedURLException e2) {
- System.out.println("Malformed URI: "+key+" : "+e2);
- return;
- }
- try {
- long startTime = System.currentTimeMillis();
- FetchResult result = client.fetch(uri);
- ClientMetadata cm = result.getMetadata();
- System.out.println("Content MIME type:
"+cm.getMIMEType());
- Bucket data = result.asBucket();
- // Now calculate filename
- String fnam = uri.getDocName();
- fnam = sanitize(fnam);
- if(fnam.length() == 0) {
- fnam =
"freenet-download-"+HexUtil.bytesToHex(BucketTools.hash(data), 0, 10);
- String ext =
DefaultMIMETypes.getExtension(cm.getMIMEType());
- if(ext != null && !ext.equals(""))
- fnam += "." + ext;
- }
- File f = new File(downloadsDir, fnam);
- if(f.exists()) {
- System.out.println("File exists already: "+fnam);
- fnam = "freenet-"+System.currentTimeMillis()+"-"+fnam;
- }
- FileOutputStream fos = null;
- try {
- fos = new FileOutputStream(f);
- BucketTools.copyTo(data, fos, Long.MAX_VALUE);
- fos.close();
- System.out.println("Written to "+fnam);
- } catch (IOException e) {
- System.out.println("Could not write file: caught "+e);
- e.printStackTrace();
- } finally {
- if(fos != null) try {
- fos.close();
- } catch (IOException e1) {
- // Ignore
- }
- }
- long endTime = System.currentTimeMillis();
- long sz = data.size();
- double rate = 1000.0 * sz / (endTime-startTime);
- System.out.println("Download rate: "+rate+" bytes / second");
- } catch (FetchException e) {
- System.out.println("Error: "+e.getMessage());
- if(e.getMode() == e.SPLITFILE_ERROR && e.errorCodes != null) {
- System.out.println(e.errorCodes.toVerboseString());
- }
- }
- } else if(uline.startsWith("QUIT")) {
- System.out.println("Goodbye.");
- System.exit(0);
- } else if(uline.startsWith("PUT:") || (getCHKOnly =
uline.startsWith("GETCHK:"))) {
- // Just insert to local store
- line = line.substring("PUT:".length());
- while(line.length() > 0 && line.charAt(0) == ' ')
- line = line.substring(1);
- while(line.length() > 0 && line.charAt(line.length()-1) == ' ')
- line = line.substring(0, line.length()-2);
- String content;
- if(line.length() > 0) {
- // Single line insert
- content = line;
- } else {
- // Multiple line insert
- content = readLines(reader, false);
- }
- // Insert
- byte[] data = content.getBytes();
-
- InsertBlock block = new InsertBlock(new ArrayBucket(data), null,
FreenetURI.EMPTY_CHK_URI);
-
- FreenetURI uri;
- try {
- uri = client.insert(block, getCHKOnly);
- } catch (InserterException e) {
- System.out.println("Error: "+e.getMessage());
- if(e.uri != null)
- System.out.println("URI would have been: "+e.uri);
- int mode = e.getMode();
- if(mode == InserterException.FATAL_ERRORS_IN_BLOCKS || mode ==
InserterException.TOO_MANY_RETRIES_IN_BLOCKS) {
- System.out.println("Splitfile-specific
error:\n"+e.errorCodes.toVerboseString());
- }
- return;
- }
-
- System.out.println("URI: "+uri);
-
////////////////////////////////////////////////////////////////////////////////
- } else if(uline.startsWith("PUTDIR:") ||
(uline.startsWith("PUTSSKDIR")) || (getCHKOnly =
uline.startsWith("GETCHKDIR:"))) {
- // TODO: Check for errors?
- boolean ssk = false;
- if(uline.startsWith("PUTDIR:"))
- line = line.substring("PUTDIR:".length());
- else if(uline.startsWith("PUTSSKDIR:")) {
- line = line.substring("PUTSSKDIR:".length());
- ssk = true;
- } else if(uline.startsWith("GETCHKDIR:"))
- line = line.substring(("GETCHKDIR:").length());
- else
- System.err.println("Impossible");
-
- line = line.trim();
-
- if(line.length() < 1) {
- printHeader();
- return;
- }
-
- String defaultFile = null;
-
- FreenetURI insertURI = FreenetURI.EMPTY_CHK_URI;
-
- // set default file?
- if (line.matches("^.*#.*$")) {
- String[] split = line.split("#");
- if(ssk) {
- insertURI = new FreenetURI(split[0]);
- line = split[1];
- if(split.length > 2)
- defaultFile = split[2];
- } else {
- defaultFile = split[1];
- line = split[0];
- }
- }
-
- HashMap bucketsByName =
- makeBucketsByName(line);
-
- if(defaultFile == null) {
- String[] defaultFiles =
- new String[] { "index.html", "index.htm",
"default.html", "default.htm" };
- for(int i=0;i<defaultFiles.length;i++) {
- if(bucketsByName.containsKey(defaultFiles[i])) {
- defaultFile = defaultFiles[i];
- break;
- }
- }
- }
-
- FreenetURI uri;
- try {
- uri = client.insertManifest(insertURI,
bucketsByName, defaultFile);
- uri = uri.addMetaStrings(new String[] { "" });
-
System.out.println("=======================================================");
- System.out.println("URI: "+uri);
-
System.out.println("=======================================================");
- } catch (InserterException e) {
- System.out.println("Finished insert but: "+e.getMessage());
- if(e.uri != null) {
- uri = e.uri;
- uri = uri.addMetaStrings(new String[] { "" });
- System.out.println("URI would have been: "+uri);
- }
- if(e.errorCodes != null) {
- System.out.println("Splitfile errors breakdown:");
- System.out.println(e.errorCodes.toVerboseString());
- }
- Logger.error(this, "Caught "+e, e);
- }
-
- } else if(uline.startsWith("PUTFILE:") || (getCHKOnly =
uline.startsWith("GETCHKFILE:"))) {
- // Just insert to local store
- if(getCHKOnly) {
- line = line.substring(("GETCHKFILE:").length());
- } else {
- line = line.substring("PUTFILE:".length());
- }
- while(line.length() > 0 && line.charAt(0) == ' ')
- line = line.substring(1);
- while(line.length() > 0 && line.charAt(line.length()-1) == ' ')
- line = line.substring(0, line.length()-2);
- File f = new File(line);
- System.out.println("Attempting to read file "+line);
- long startTime = System.currentTimeMillis();
- try {
- if(!(f.exists() && f.canRead())) {
- throw new FileNotFoundException();
- }
-
- // Guess MIME type
- String mimeType = DefaultMIMETypes.guessMIMEType(line);
- System.out.println("Using MIME type: "+mimeType);
-
- FileBucket fb = new FileBucket(f, true, false, false);
- InsertBlock block = new InsertBlock(fb, new
ClientMetadata(mimeType), FreenetURI.EMPTY_CHK_URI);
-
- startTime = System.currentTimeMillis();
- FreenetURI uri = client.insert(block, getCHKOnly);
-
- // FIXME depends on CHK's still being renamable
- //uri = uri.setDocName(f.getName());
-
- System.out.println("URI: "+uri);
- long endTime = System.currentTimeMillis();
- long sz = f.length();
- double rate = 1000.0 * sz / (endTime-startTime);
- System.out.println("Upload rate: "+rate+" bytes / second");
- } catch (FileNotFoundException e1) {
- System.out.println("File not found");
- } catch (InserterException e) {
- System.out.println("Finished insert but: "+e.getMessage());
- if(e.uri != null) {
- System.out.println("URI would have been: "+e.uri);
- long endTime = System.currentTimeMillis();
- long sz = f.length();
- double rate = 1000.0 * sz / (endTime-startTime);
- System.out.println("Upload rate: "+rate+" bytes / second");
- }
- if(e.errorCodes != null) {
- System.out.println("Splitfile errors breakdown:");
- System.out.println(e.errorCodes.toVerboseString());
- }
- } catch (Throwable t) {
- System.out.println("Insert threw: "+t);
- t.printStackTrace();
- }
- } else if(uline.startsWith("MAKESSK")) {
- InsertableClientSSK key = InsertableClientSSK.createRandom(r);
- System.out.println("Insert URI:
"+key.getInsertURI().toString(false));
- System.out.println("Request URI:
"+key.getURI().toString(false));
- FreenetURI insertURI =
key.getInsertURI().setDocName("testsite");
- String fixedInsertURI = insertURI.toString(false);
- System.out.println("Note that you MUST add a filename to the
end of the above URLs e.g.:\n"+fixedInsertURI);
- System.out.println("Normally you will then do PUTSSKDIR:<insert
URI>#<directory to upload>, for
example:\nPUTSSKDIR:"+fixedInsertURI+"#directoryToUpload/");
- System.out.println("This will then produce a manifest site
containing all the files, the default document can be accessed
at\n"+insertURI.addMetaStrings(new String[] { "" }).toString(false));
- } else if(uline.startsWith("PUTSSK:")) {
- String cmd = line.substring("PUTSSK:".length());
- cmd = cmd.trim();
- if(cmd.indexOf(';') <= 0) {
- System.out.println("No target URI provided.");
- System.out.println("PUTSSK:<insert uri>;<url to
redirect to>");
- return;
- }
- String[] split = cmd.split(";");
- String insertURI = split[0];
- String targetURI = split[1];
- System.out.println("Insert URI: "+insertURI);
- System.out.println("Target URI: "+targetURI);
- FreenetURI insert = new FreenetURI(insertURI);
- FreenetURI target = new FreenetURI(targetURI);
- InsertableClientSSK key = InsertableClientSSK.create(insert);
- System.out.println("Fetch URI: "+key.getURI());
- try {
- FreenetURI result =
client.insertRedirect(insert, target);
- System.out.println("Successfully inserted to
fetch URI: "+key.getURI());
- } catch (InserterException e) {
- System.out.println("Finished insert but: "+e.getMessage());
- Logger.normal(this, "Error: "+e, e);
- if(e.uri != null) {
- System.out.println("URI would have been: "+e.uri);
- }
- }
-
- } else if(uline.startsWith("STATUS")) {
- SimpleFieldSet fs = n.exportFieldSet();
- System.out.println(fs.toString());
- System.out.println();
- System.out.println(n.getStatus());
- if(Version.buildNumber<Version.highestSeenBuild){
- System.out.println("The latest version is :
"+Version.highestSeenBuild);
- }
- } else if(uline.startsWith("CONNECT:")) {
- String key = line.substring("CONNECT:".length());
- while(key.length() > 0 && key.charAt(0) == ' ')
- key = key.substring(1);
- while(key.length() > 0 && key.charAt(key.length()-1) == ' ')
- key = key.substring(0, key.length()-2);
- if(key.length() > 0) {
- // Filename
- System.out.println("Trying to connect to noderef in "+key);
- File f = new File(key);
- System.out.println("Attempting to read file "+key);
- try {
- FileInputStream fis = new FileInputStream(key);
- DataInputStream dis = new DataInputStream(fis);
- int length = (int)f.length();
- byte[] data = new byte[length];
- dis.readFully(data);
- dis.close();
- connect(new String(data));
- } catch (IOException e) {
- System.err.println("Could not read file: "+e);
- e.printStackTrace(System.err);
- }
- } else {
- String content = readLines(reader, true);
- if(content == null) return;
- if(content.equals("")) return;
- connect(content);
- }
- } else if(uline.startsWith("NAME:")) {
- System.out.println("Node name currently: "+n.myName);
- String key = line.substring("NAME:".length());
- while(key.length() > 0 && key.charAt(0) == ' ')
- key = key.substring(1);
- while(key.length() > 0 && key.charAt(key.length()-1) == ' ')
- key = key.substring(0, key.length()-2);
- System.out.println("New name: "+key);
- n.setName(key);
- } else if(uline.startsWith("DISCONNECT:")) {
- String ipAndPort = line.substring("DISCONNECT:".length());
- disconnect(ipAndPort.trim());
- } else {
- if(uline.length() > 0)
- printHeader();
- }
- }
-
- /**
- * Create a map of String -> Bucket for every file in a directory
- * and its subdirs.
- */
- private HashMap makeBucketsByName(String directory) {
-
- if (!directory.endsWith("/"))
- directory = directory + "/";
- File thisdir = new File(directory);
-
- System.out.println("Listing dir: "+thisdir);
-
- HashMap ret = new HashMap();
-
- File filelist[] = thisdir.listFiles();
- for(int i = 0 ; i < filelist.length ; i++) {
- if (filelist[i].isFile() && filelist[i].canRead()) {
- File f = filelist[i];
-
- FileBucket bucket = new FileBucket(f, true, false,
false);
-
- ret.put(f.getName(), bucket);
- } else if(filelist[i].isDirectory()) {
- HashMap subdir = makeBucketsByName(directory +
filelist[i].getName());
- Iterator it = subdir.keySet().iterator();
- while(it.hasNext()) {
- String key = (String) it.next();
- Bucket bucket = (Bucket) subdir.get(key);
- ret.put(filelist[i].getName() + "/" + key,
bucket);
- }
- }
- }
- return ret;
- }
-
- private String dirPutToList(HashMap dir, String basedir) {
- String ret = "";
- for(Iterator i = dir.keySet().iterator();i.hasNext();) {
- String key = (String) i.next();
- Object o = dir.get(key);
- Metadata target;
- if(o instanceof String) {
- // File
- ret += basedir + key + "\n";
- } else if(o instanceof HashMap) {
- ret += dirPutToList((HashMap)o, basedir + key +
"//");
- } else throw new IllegalArgumentException("Not String
nor HashMap: "+o);
- }
- return ret;
- }
-
- private HashMap dirPut(String directory, boolean getCHKOnly) {
- if (!directory.endsWith("/"))
- directory = directory + "/";
- File thisdir = new File(directory);
-
- System.out.println("Listing dir: "+thisdir);
-
- HashMap ret = new HashMap();
-
- File filelist[] = thisdir.listFiles();
- for(int i = 0 ; i < filelist.length ; i++)
- if (filelist[i].isFile()) {
- FreenetURI uri = null;
- File f = filelist[i];
- String line = f.getAbsolutePath();
- // To ease cleanup, the following code is taken from
above
- // Except for the uri-declaration above.
- // Somelines is also commented out
-
//////////////////////////////////////////////////////////////////////////////////////
- System.out.println("Attempting to read file "+line);
- long startTime = System.currentTimeMillis();
- try {
- if(!(f.exists() && f.canRead())) {
- throw new FileNotFoundException();
- }
-
- // Guess MIME type
- String mimeType = DefaultMIMETypes.guessMIMEType(line);
- System.out.println("Using MIME type: "+mimeType);
-
- FileBucket fb = new FileBucket(f, true, false, false);
- InsertBlock block = new InsertBlock(fb, new
ClientMetadata(mimeType), FreenetURI.EMPTY_CHK_URI);
-
- startTime = System.currentTimeMillis();
- // Declaration is moved out!!!!!!!!!!!!
- uri = client.insert(block, getCHKOnly);
-
- // FIXME depends on CHK's still being renamable
- //uri = uri.setDocName(f.getName());
-
- System.out.println("URI: "+uri);
- long endTime = System.currentTimeMillis();
- long sz = f.length();
- double rate = 1000.0 * sz / (endTime-startTime);
- System.out.println("Upload rate: "+rate+" bytes / second");
- } catch (FileNotFoundException e1) {
- System.out.println("File not found");
- } catch (InserterException e) {
- System.out.println("Finished insert but:
"+e.getMessage());
- if(e.uri != null) {
- System.out.println("URI would have been:
"+e.uri);
- long endTime = System.currentTimeMillis();
- long sz = f.length();
- double rate = 1000.0 * sz / (endTime-startTime);
- System.out.println("Upload rate: "+rate+" bytes /
second");
- }
- if(e.errorCodes != null) {
- System.out.println("Splitfile errors
breakdown:");
-
System.out.println(e.errorCodes.toVerboseString());
- }
- } catch (Throwable t) {
- System.out.println("Insert threw: "+t);
- t.printStackTrace();
- }
-
//////////////////////////////////////////////////////////////////////////////////////
-
- if (uri != null)
- ret.put(filelist[i].getName(), uri.toString(false));
- else
- System.err.println("Could not insert file.");
- //ret.put(filelist[i].getName(), null);
- } else {
- HashMap subdir = dirPut(filelist[i].getAbsolutePath(),
getCHKOnly);
- ret.put(filelist[i].getName(), subdir);
- }
-
- return ret;
- }
-
-
- /**
- * @return A block of text, input from stdin, ending with a
- * . on a line by itself. Does some mangling for a fieldset if
- * isFieldSet.
- */
- private String readLines(BufferedReader reader, boolean isFieldSet) {
- StringBuffer sb = new StringBuffer(1000);
- boolean breakflag = false;
- while(true) {
- String line;
- try {
- line = reader.readLine();
- if(line == null) throw new EOFException();
- } catch (IOException e1) {
- System.err.println("Bye... ("+e1+")");
- return null;
- }
- if((!isFieldSet) && line.equals(".")) break;
- if(isFieldSet) {
- // Mangling
- // First trim
- line = line.trim();
- if(line.equals("End")) {
- breakflag = true;
- } else {
- if(line.endsWith("End") &&
-
Character.isWhitespace(line.charAt(line.length()-("End".length()+1)))) {
- line = "End";
- breakflag = true;
- } else {
- int idx = line.indexOf('=');
- if(idx < 0) {
- System.err.println("No = and no End in line:
"+line);
- return "";
- } else {
- if(idx > 0) {
- String after;
- if(idx == line.length()-1)
- after = "";
- else
- after = line.substring(idx+1);
- String before = line.substring(0, idx);
- before = before.trim();
- int x = 0;
- for(int j=before.length()-1;j>=0;j--) {
- char c = before.charAt(j);
- if(c == '.' ||
Character.isLetterOrDigit(c)) {
- // Valid character for field
- } else {
- x=j+1;
- break;
- }
- }
- before = before.substring(x);
- line = before + '=' + after;
- //System.out.println(line);
- } else {
- System.err.println("Invalid empty field name");
- breakflag = true;
- }
- }
- }
- }
- }
- sb.append(line).append('\n');
- if(breakflag) break;
- }
- return sb.toString();
- }
-
- /**
- * Connect to a node, given its reference.
- */
- private void connect(String content) {
- SimpleFieldSet fs;
- System.out.println("Connecting to:\n"+content);
- try {
- fs = new SimpleFieldSet(content);
- } catch (IOException e) {
- System.err.println("Did not parse: "+e);
- e.printStackTrace();
- return;
- }
- PeerNode pn;
- try {
- pn = new PeerNode(fs, n);
- } catch (FSParseException e1) {
- System.err.println("Did not parse: "+e1.getMessage());
- return;
- } catch (PeerParseException e1) {
- System.err.println("Did not parse: "+e1.getMessage());
- return;
- }
- if(n.peers.addPeer(pn))
- System.out.println("Added peer: "+pn);
- n.peers.writePeers();
- }
-
- /**
- * Disconnect from a node, given its ip and port as a String
- */
- private void disconnect(String ipAndPort) {
- System.out.println("Disconnecting from node at: "+ipAndPort);
- PeerNode[] pn = n.peers.myPeers;
- for(int i=0;i<pn.length;i++)
- {
- String nodeIpAndPort =
pn[i].getPeer().getAddress().getHostAddress()+":"+pn[i].getPeer().getPort();
- if(nodeIpAndPort.equals(ipAndPort))
- {
- n.peers.disconnect(pn[i]);
- return;
- }
- }
- System.out.println("No node in peers list at: "+ipAndPort);
- }
-
- private String sanitize(String fnam) {
- if(fnam == null) return "";
- StringBuffer sb = new StringBuffer(fnam.length());
- for(int i=0;i<fnam.length();i++) {
- char c = fnam.charAt(i);
- if(Character.isLetterOrDigit(c) || c == '-' || c == '.')
- sb.append(c);
- }
- return sb.toString();
- }
-}
Copied:
branches/async-client-layer/src/freenet/node/TextModeClientInterface.java (from
rev 7873, trunk/freenet/src/freenet/node/TextModeClientInterface.java)
Deleted: branches/async-client-layer/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2006-01-18 00:45:46 UTC (rev
7871)
+++ branches/async-client-layer/src/freenet/node/Version.java 2006-01-20
19:03:11 UTC (rev 7885)
@@ -1,251 +0,0 @@
-package freenet.node;
-
-import freenet.support.Fields;
-import freenet.support.Logger;
-
-import java.util.StringTokenizer;
-
-/**
- * Central spot for stuff related to the versioning of the codebase.
- */
-public abstract class Version {
-
- /** FReenet Reference Daemon */
- public static final String nodeName = "Fred";
-
- /** The current tree version */
- public static final String nodeVersion = "0.7";
-
- /** The protocol version supported */
- public static final String protocolVersion = "1.0";
-
- /** The build number of the current revision */
- public static final int buildNumber = 359;
-
- /** Oldest build of Fred we will talk to */
- public static final int lastGoodBuild = 359;
-
- /** The highest reported build of fred */
- public static int highestSeenBuild = buildNumber;
-
- /** The current stable tree version */
- public static final String stableNodeVersion = "0.7";
-
- /** The stable protocol version supported */
- public static final String stableProtocolVersion = "STABLE-0.7";
-
- /** Oldest stable build of Fred we will talk to */
- public static final int lastGoodStableBuild = 1;
-
- /** Revision number of Version.java as read from CVS */
- public static final String cvsRevision;
-
- private static boolean logDEBUG =
Logger.shouldLog(Logger.DEBUG,Version.class);
- static {
- StringTokenizer cvsId =
- new StringTokenizer("$Id: Version.java,v 1.134
2005/09/20 18:51:32 amphibian Exp $");
- cvsId.nextToken();
- cvsId.nextToken();
- cvsRevision = cvsId.nextToken();
- }
-
- /**
- * @return the node's version designators as an array
- */
- public static final String[] getVersion() {
- String[] ret =
- { nodeName, nodeVersion, protocolVersion, "" +
buildNumber };
- return ret;
- }
-
- public static final String[] getLastGoodVersion() {
- String[] ret =
- { nodeName, nodeVersion, protocolVersion, "" +
lastGoodBuild };
- return ret;
- }
-
- /**
- * @return the version string that should be presented in the
NodeReference
- */
- public static final String getVersionString() {
- return Fields.commaList(getVersion());
- }
-
- /**
- * @return is needed for the freeviz
- */
- public static final String getLastGoodVersionString() {
- return Fields.commaList(getLastGoodVersion());
- }
-
- /**
- * @return true if requests should be accepted from nodes brandishing
this
- * protocol version string
- */
- private static boolean goodProtocol(String prot) {
- if (prot.equals(protocolVersion)
-// uncomment next line to accept stable, see also explainBadVersion() below
-// || prot.equals(stableProtocolVersion)
- )
- return true;
- return false;
- }
-
- /**
- * @return true if requests should be accepted from nodes brandishing
this
- * version string
- */
- public static final boolean checkGoodVersion(
- String version) {
- if(version == null) {
- Logger.error(Version.class, "version == null!",
- new Exception("error"));
- return false;
- }
- String[] v = Fields.commaList(version);
-
- if (v.length < 3 || !goodProtocol(v[2])) {
- return false;
- }
- if (sameVersion(v)) {
- try {
- int build = Integer.parseInt(v[3]);
- if (build < lastGoodBuild) {
- if(logDEBUG) Logger.debug(
- Version.class,
- "Not accepting unstable from
version: "
- + version
- + "(lastGoodBuild="
- + lastGoodBuild
- + ")");
- return false;
- }
- } catch (NumberFormatException e) {
- Logger.minor(
- Version.class,
- "Not accepting (" + e + ") from " +
version);
- return false;
- }
- }
- if (stableVersion(v)) {
- try {
- int build = Integer.parseInt(v[3]);
- if(build < lastGoodStableBuild) {
- if(logDEBUG) Logger.debug(
- Version.class,
- "Not accepting stable from
version"
- + version
- +
"(lastGoodStableBuild="
- + lastGoodStableBuild
- + ")");
- return false;
- }
- } catch (NumberFormatException e) {
- Logger.minor(
- Version.class,
- "Not accepting (" + e + ") from " +
version);
- return false;
- }
- }
- if(logDEBUG)
- Logger.minor(Version.class, "Accepting: " + version);
- return true;
- }
-
- /**
- * @return string explaining why a version string is rejected
- */
- public static final String explainBadVersion(String version) {
- String[] v = Fields.commaList(version);
-
- if (v.length < 3 || !goodProtocol(v[2])) {
- return "Required protocol version is "
- + protocolVersion
-// uncomment next line if accepting stable, see also goodProtocol() above
-// + " or " + stableProtocolVersion
- ;
- }
- if (sameVersion(v)) {
- try {
- int build = Integer.parseInt(v[3]);
- if (build < lastGoodBuild)
- return "Build older than last good
build " + lastGoodBuild;
- } catch (NumberFormatException e) {
- return "Build number not numeric.";
- }
- }
- if (stableVersion(v)) {
- try {
- int build = Integer.parseInt(v[3]);
- if (build < lastGoodStableBuild)
- return "Build older than last good
stable build " + lastGoodStableBuild;
- } catch (NumberFormatException e) {
- return "Build number not numeric.";
- }
- }
- return null;
- }
-
- /**
- * Update static variable highestSeenBuild anytime we encounter
- * a new node with a higher version than we've seen before
- */
- public static final void seenVersion(String version) {
- String[] v = Fields.commaList(version);
-
- if (v.length < 3)
- return; // bad, but that will be discovered elsewhere
-
- if (sameVersion(v)) {
-
- int buildNo;
- try {
- buildNo = Integer.parseInt(v[3]);
- } catch (NumberFormatException e) {
- return;
- }
- if (buildNo > highestSeenBuild) {
- if (Logger.shouldLog(Logger.MINOR,
Version.class)) {
- Logger.minor(
- Version.class,
- "New highest seen build: " +
buildNo);
- }
- highestSeenBuild = buildNo;
- }
- }
- }
-
- /**
- * @return true if the string describes the same node version as ours.
- * Note that the build number may be different, and is ignored.
- */
- public static boolean sameVersion(String[] v) {
- return v[0].equals(nodeName)
- && v[1].equals(nodeVersion)
- && v.length >= 4;
- }
-
- /**
- * @return true if the string describes a stable node version
- */
- private static boolean stableVersion(String[] v) {
- return v[0].equals(nodeName)
- && v[1].equals(stableNodeVersion)
- && v.length >= 4;
- }
-
- public static void main(String[] args) throws Throwable {
- System.out.println(
- "Freenet: "
- + nodeName
- + " "
- + nodeVersion
- + " (protocol "
- + protocolVersion
- + ") build "
- + buildNumber
- + " (last good build: "
- + lastGoodBuild
- + ")");
- }
-}
Copied: branches/async-client-layer/src/freenet/node/Version.java (from rev
7875, trunk/freenet/src/freenet/node/Version.java)
Copied: branches/async-client-layer/src/freenet/node/fcp (from rev 7873,
trunk/freenet/src/freenet/node/fcp)
Deleted: branches/async-client-layer/src/freenet/node/fcp/ClientGet.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGet.java 2006-01-18 16:38:29 UTC
(rev 7873)
+++ branches/async-client-layer/src/freenet/node/fcp/ClientGet.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,70 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.Fetcher;
-import freenet.client.FetcherContext;
-import freenet.keys.FreenetURI;
-import freenet.support.Logger;
-
-/**
- * A simple client fetch. This can of course fetch arbitrarily large
- * files, including splitfiles, redirects, etc.
- */
-public class ClientGet extends ClientRequest implements Runnable {
-
- private final FreenetURI uri;
- private final FetcherContext fctx;
- private final Fetcher f;
- private final String identifier;
- private final int verbosity;
- private final FCPConnectionHandler handler;
-
- public ClientGet(FCPConnectionHandler handler, ClientGetMessage
message) {
- uri = message.uri;
- // Create a Fetcher directly in order to get more fine-grained
control,
- // since the client may override a few context elements.
- this.handler = handler;
- fctx = new FetcherContext(handler.defaultFetchContext,
FetcherContext.IDENTICAL_MASK);
- // ignoreDS
- fctx.localRequestOnly = message.dsOnly;
- fctx.ignoreStore = message.ignoreDS;
- fctx.maxNonSplitfileRetries = message.maxRetries;
- fctx.maxSplitfileBlockRetries =
Math.max(fctx.maxSplitfileBlockRetries, message.maxRetries);
- this.identifier = message.identifier;
- this.verbosity = message.verbosity;
- // FIXME do something with verbosity !!
- // Has already been checked
- if(message.returnType != ClientGetMessage.RETURN_TYPE_DIRECT)
- throw new IllegalStateException("Unknown return type:
"+message.returnType);
- fctx.maxOutputLength = message.maxSize;
- fctx.maxTempLength = message.maxTempSize;
- f = new Fetcher(uri, fctx);
- Thread t = new Thread(this, "FCP fetcher for "+uri+"
("+identifier+") on "+handler);
- t.setDaemon(true);
- t.start();
- }
-
- public void cancel() {
- fctx.cancel();
- }
-
- public void run() {
- try {
- FetchResult fr = f.run();
- // Success!!!
- FCPMessage msg = new DataFoundMessage(handler, fr,
identifier);
- handler.outputHandler.queue(msg);
- // Send all the data at once
- // FIXME there should be other options
- msg = new AllDataMessage(handler, fr.asBucket(),
identifier);
- handler.outputHandler.queue(msg);
- } catch (FetchException e) {
- // Error
- Logger.minor(this, "Caught "+e, e);
- FCPMessage msg = new FetchErrorMessage(handler, e,
identifier);
- handler.outputHandler.queue(msg);
- }
- }
-
-}
Copied: branches/async-client-layer/src/freenet/node/fcp/ClientGet.java (from
rev 7874, trunk/freenet/src/freenet/node/fcp/ClientGet.java)
Deleted: branches/async-client-layer/src/freenet/node/fcp/ClientGetMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java 2006-01-18
16:38:29 UTC (rev 7873)
+++ branches/async-client-layer/src/freenet/node/fcp/ClientGetMessage.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,130 +0,0 @@
-package freenet.node.fcp;
-
-import java.net.MalformedURLException;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-/**
- * ClientGet message.
- *
- * Example:
- *
- * ClientGet
- * IgnoreDS=false // true = ignore the datastore
- * DSOnly=false // true = only check the datastore, don't route (~= htl 0)
- * URI=KSK at gpl.txt
- * Identifier=Request Number One
- * Verbosity=0 // no status, just tell us when it's done
- * ReturnType=direct // return all at once over the FCP connection
- * MaxSize=100 // maximum size of returned data (all numbers in hex)
- * MaxTempSize=1000 // maximum size of intermediary data
- * MaxRetries=100 // automatic retry supported as an option
- * EndMessage
- */
-public class ClientGetMessage extends FCPMessage {
-
- final boolean ignoreDS;
- final boolean dsOnly;
- final FreenetURI uri;
- final String identifier;
- final int verbosity;
- final int returnType;
- final long maxSize;
- final long maxTempSize;
- final int maxRetries;
-
- // FIXME move these to the actual getter process
- static final int RETURN_TYPE_DIRECT = 0;
-
- public ClientGetMessage(SimpleFieldSet fs) throws
MessageInvalidException {
- ignoreDS = Boolean.getBoolean(fs.get("IgnoreDS"));
- dsOnly = Boolean.getBoolean(fs.get("DSOnly"));
- try {
- uri = new FreenetURI(fs.get("URI"));
- } catch (MalformedURLException e) {
- throw new
MessageInvalidException(ProtocolErrorMessage.URI_PARSE_ERROR, e.getMessage());
- }
- identifier = fs.get("Identifier");
- if(identifier == null)
- throw new
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "No Identifier");
- String verbosityString = fs.get("Verbosity");
- if(verbosityString == null)
- verbosity = 0;
- else {
- try {
- verbosity = Integer.parseInt(verbosityString,
16);
- } catch (NumberFormatException e) {
- throw new
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error
parsing Verbosity field: "+e.getMessage());
- }
- }
- String returnTypeString = fs.get("ReturnType");
- if(returnTypeString == null ||
returnTypeString.equalsIgnoreCase("direct"))
- returnType = RETURN_TYPE_DIRECT;
- else
- throw new
MessageInvalidException(ProtocolErrorMessage.MESSAGE_PARSE_ERROR, "Unknown
return-type");
- String maxSizeString = fs.get("MaxSize");
- if(maxSizeString == null)
- // default to unlimited
- maxSize = Long.MAX_VALUE;
- else {
- try {
- maxSize = Long.parseLong(maxSizeString, 16);
- } catch (NumberFormatException e) {
- throw new
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error
parsing MaxSize field: "+e.getMessage());
- }
- }
- String maxTempSizeString = fs.get("MaxTempSize");
- if(maxTempSizeString == null)
- // default to unlimited
- maxTempSize = Long.MAX_VALUE;
- else {
- try {
- maxTempSize = Long.parseLong(maxTempSizeString,
16);
- } catch (NumberFormatException e) {
- throw new
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error
parsing MaxSize field: "+e.getMessage());
- }
- }
- String maxRetriesString = fs.get("MaxRetries");
- if(maxRetriesString == null)
- // default to 0
- maxRetries = 0;
- else {
- try {
- maxRetries = Integer.parseInt(maxRetriesString,
16);
- } catch (NumberFormatException e) {
- throw new
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error
parsing MaxSize field: "+e.getMessage());
- }
- }
- }
-
- public SimpleFieldSet getFieldSet() {
- SimpleFieldSet fs = new SimpleFieldSet();
- fs.put("IgnoreDS", Boolean.toString(ignoreDS));
- fs.put("URI", uri.toString(false));
- fs.put("Identifier", identifier);
- fs.put("Verbosity", Integer.toHexString(verbosity));
- fs.put("ReturnType", getReturnTypeString());
- fs.put("MaxSize", Long.toHexString(maxSize));
- fs.put("MaxTempSize", Long.toHexString(maxTempSize));
- fs.put("MaxRetries", Integer.toHexString(maxRetries));
- return fs;
- }
-
- private String getReturnTypeString() {
- if(returnType == RETURN_TYPE_DIRECT)
- return "direct";
- else
- throw new IllegalStateException("Unknown return type:
"+returnType);
- }
-
- public String getName() {
- return "ClientGet";
- }
-
- public void run(FCPConnectionHandler handler, Node node) {
- handler.startClientGet(this);
- }
-
-}
Copied: branches/async-client-layer/src/freenet/node/fcp/ClientGetMessage.java
(from rev 7875, trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java)
Deleted:
branches/async-client-layer/src/freenet/node/fcp/ClientHelloMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java 2006-01-18
16:38:29 UTC (rev 7873)
+++ branches/async-client-layer/src/freenet/node/fcp/ClientHelloMessage.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,45 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-/**
- * ClientHello
- * Name=Toad's Test Client
- * ExpectedVersion=0.7.0
- * End
- */
-public class ClientHelloMessage extends FCPMessage {
-
- String clientName;
- String clientExpectedVersion;
-
- public ClientHelloMessage(SimpleFieldSet fs) throws
MessageInvalidException {
- clientName = fs.get("Name");
- clientExpectedVersion = fs.get("ExpectedVersion");
- if(clientName == null)
- throw new
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "ClientHello must
contain a Name field");
- if(clientExpectedVersion == null)
- throw new
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "ClientHello must
contain a ExpectedVersion field");
- // FIXME check the expected version
- }
-
- public SimpleFieldSet getFieldSet() {
- SimpleFieldSet sfs = new SimpleFieldSet();
- sfs.put("Name", clientName);
- sfs.put("ExpectedVersion", clientExpectedVersion);
- return sfs;
- }
-
- public String getName() {
- return "ClientHello";
- }
-
- public void run(FCPConnectionHandler handler, Node node) {
- // We know the Hello is valid.
- handler.setClientName(clientName);
- FCPMessage msg = new NodeHelloMessage();
- handler.outputHandler.queue(msg);
- }
-
-}
Copied:
branches/async-client-layer/src/freenet/node/fcp/ClientHelloMessage.java (from
rev 7875, trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java)
Copied: branches/async-client-layer/src/freenet/node/fcp/ClientPut.java (from
rev 7874, trunk/freenet/src/freenet/node/fcp/ClientPut.java)
Copied: branches/async-client-layer/src/freenet/node/fcp/ClientPutMessage.java
(from rev 7875, trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java)
Deleted:
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
2006-01-18 16:38:29 UTC (rev 7873)
+++ branches/async-client-layer/src/freenet/node/fcp/FCPConnectionHandler.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,108 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.util.HashMap;
-
-import freenet.client.FetcherContext;
-import freenet.node.Node;
-import freenet.support.BucketFactory;
-import freenet.support.Logger;
-
-public class FCPConnectionHandler {
-
- final Socket sock;
- final FCPConnectionInputHandler inputHandler;
- final FCPConnectionOutputHandler outputHandler;
- final Node node;
- private boolean isClosed;
- private boolean inputClosed;
- private boolean outputClosed;
- private String clientName;
- final BucketFactory bf;
- final HashMap requestsByIdentifier;
- final FetcherContext defaultFetchContext;
-
- public FCPConnectionHandler(Socket s, Node node) {
- this.sock = s;
- this.node = node;
- this.inputHandler = new FCPConnectionInputHandler(this);
- this.outputHandler = new FCPConnectionOutputHandler(this);
- isClosed = false;
- this.bf = node.tempBucketFactory;
- requestsByIdentifier = new HashMap();
- defaultFetchContext =
- node.makeClient((short)0,(short)0).getFetcherContext();
- }
-
- public void close() {
- ClientRequest[] requests;
- synchronized(this) {
- isClosed = true;
- requests = new
ClientRequest[requestsByIdentifier.size()];
- requests = (ClientRequest[])
requestsByIdentifier.values().toArray(requests);
- }
- for(int i=0;i<requests.length;i++)
- requests[i].cancel();
- }
-
- public boolean isClosed() {
- return isClosed;
- }
-
- public void closedInput() {
- try {
- sock.shutdownInput();
- } catch (IOException e) {
- // Ignore
- }
- synchronized(this) {
- inputClosed = true;
- if(!outputClosed) return;
- }
- try {
- sock.close();
- } catch (IOException e) {
- // Ignore
- }
- }
-
- public void closedOutput() {
- try {
- sock.shutdownOutput();
- } catch (IOException e) {
- // Ignore
- }
- synchronized(this) {
- outputClosed = true;
- if(!inputClosed) return;
- }
- try {
- sock.close();
- } catch (IOException e) {
- // Ignore
- }
- }
-
- public void setClientName(String name) {
- this.clientName = name;
- }
-
- public String getClientName() {
- return clientName;
- }
-
- public void startClientGet(ClientGetMessage message) {
- String id = message.identifier;
- if(requestsByIdentifier.containsKey(id)) {
- Logger.normal(this, "Identifier collision on "+this);
- FCPMessage msg = new IdentifierCollisionMessage(id);
- outputHandler.queue(msg);
- return;
- }
- synchronized(this) {
- if(isClosed) return;
- ClientGet cg = new ClientGet(this, message);
- }
- }
-}
Copied:
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionHandler.java
(from rev 7874, trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java)
Deleted:
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionInputHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java
2006-01-18 16:38:29 UTC (rev 7873)
+++
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionInputHandler.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,78 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import freenet.support.Logger;
-import freenet.support.SimpleFieldSet;
-import freenet.support.io.LineReadingInputStream;
-
-public class FCPConnectionInputHandler implements Runnable {
-
- final FCPConnectionHandler handler;
-
- public FCPConnectionInputHandler(FCPConnectionHandler handler) {
- this.handler = handler;
- Thread t = new Thread(this, "FCP input handler for
"+handler.sock.getRemoteSocketAddress()+":"+handler.sock.getPort());
- t.setDaemon(true);
- t.start();
- }
-
- public void run() {
- try {
- realRun();
- } catch (IOException e) {
- Logger.minor(this, "Caught "+e, e);
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- }
- handler.close();
- handler.closedInput();
- }
-
- public void realRun() throws IOException {
- InputStream is = handler.sock.getInputStream();
- LineReadingInputStream lis = new LineReadingInputStream(is);
-
- boolean firstMessage = true;
-
- while(true) {
- SimpleFieldSet fs;
- // Read a message
- String messageType = lis.readLine(64, 64);
- fs = new SimpleFieldSet(lis, 4096, 128);
- FCPMessage msg;
- try {
- msg = FCPMessage.create(messageType, fs);
- if(msg == null) continue;
- } catch (MessageInvalidException e) {
- FCPMessage err = new
ProtocolErrorMessage(e.protocolCode, false, e.getMessage());
- handler.outputHandler.queue(err);
- continue;
- }
- if(firstMessage && !(msg instanceof
ClientHelloMessage)) {
- FCPMessage err = new
ProtocolErrorMessage(ProtocolErrorMessage.CLIENT_HELLO_MUST_BE_FIRST_MESSAGE,
true, null);
- handler.outputHandler.queue(err);
- handler.close();
- continue;
- }
- if(msg instanceof DataCarryingMessage) {
- ((DataCarryingMessage)msg).readFrom(lis,
handler.bf);
- }
- if((!firstMessage) && msg instanceof
ClientHelloMessage) {
- FCPMessage err = new
ProtocolErrorMessage(ProtocolErrorMessage.NO_LATE_CLIENT_HELLOS, false, null);
- handler.outputHandler.queue(err);
- continue;
- }
- try {
- msg.run(handler, handler.node);
- } catch (MessageInvalidException e) {
- FCPMessage err = new
ProtocolErrorMessage(e.protocolCode, false, e.getMessage());
- handler.outputHandler.queue(err);
- continue;
- }
- firstMessage = false;
- if(handler.isClosed()) return;
- }
- }
-}
Copied:
branches/async-client-layer/src/freenet/node/fcp/FCPConnectionInputHandler.java
(from rev 7874,
trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java)
Deleted: branches/async-client-layer/src/freenet/node/fcp/FCPMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPMessage.java 2006-01-18 16:38:29 UTC
(rev 7873)
+++ branches/async-client-layer/src/freenet/node/fcp/FCPMessage.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,44 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public abstract class FCPMessage {
-
- public void send(OutputStream os) throws IOException {
- SimpleFieldSet sfs = getFieldSet();
- sfs.setEndMarker(getEndString());
- String msg = sfs.toString();
- os.write((getName()+"\n").getBytes("UTF-8"));
- os.write(msg.getBytes("UTF-8"));
- }
-
- String getEndString() {
- return "EndMessage";
- }
-
- public abstract SimpleFieldSet getFieldSet();
-
- public abstract String getName();
-
- public static FCPMessage create(String name, SimpleFieldSet fs) throws
MessageInvalidException {
- if(name.equals("ClientHello"))
- return new ClientHelloMessage(fs);
- if(name.equals("ClientGet"))
- return new ClientGetMessage(fs);
- if(name.equals("Void"))
- return null;
-// if(name.equals("ClientPut"))
-// return new ClientPutFCPMessage(fs);
- // TODO Auto-generated method stub
- return null;
- }
-
- /** Do whatever it is that we do with this type of message.
- * @throws MessageInvalidException */
- public abstract void run(FCPConnectionHandler handler, Node node)
throws MessageInvalidException;
-
-}
Copied: branches/async-client-layer/src/freenet/node/fcp/FCPMessage.java (from
rev 7875, trunk/freenet/src/freenet/node/fcp/FCPMessage.java)
Copied: branches/async-client-layer/src/freenet/node/fcp/GetFailedMessage.java
(from rev 7875, trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java)
Copied: branches/async-client-layer/src/freenet/node/fcp/PutFailedMessage.java
(from rev 7874, trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java)
Copied:
branches/async-client-layer/src/freenet/node/fcp/PutSuccessfulMessage.java
(from rev 7875, trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java)
Deleted: branches/async-client-layer/src/freenet/support/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/BucketTools.java 2006-01-18 00:45:46 UTC
(rev 7871)
+++ branches/async-client-layer/src/freenet/support/BucketTools.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,445 +0,0 @@
-package freenet.support;
-
-import java.io.DataInputStream;
-import java.io.EOFException;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Helper functions for working with Buckets.
- */
-public class BucketTools {
-
- static final int BLOCK_SIZE = 4096;
-
- /**
- * Copy from the input stream of <code>src</code> to the output stream
of
- * <code>dest</code>.
- *
- * @param src
- * @param dst
- * @throws IOException
- */
- public final static void copy(Bucket src, Bucket dst) throws
IOException {
- OutputStream out = dst.getOutputStream();
- InputStream in = src.getInputStream();
- ReadableByteChannel readChannel = Channels.newChannel(in);
- WritableByteChannel writeChannel = Channels.newChannel(out);
-
- ByteBuffer buffer = ByteBuffer.allocateDirect(BLOCK_SIZE);
- while (readChannel.read(buffer) != -1) {
- buffer.flip();
- writeChannel.write(buffer);
- buffer.clear();
- }
-
- writeChannel.close();
- readChannel.close();
- out.close();
- in.close();
- }
-
- public final static void zeroPad(Bucket b, long size) throws
IOException {
- OutputStream out = b.getOutputStream();
-
- // Initialized to zero by default.
- byte[] buffer = new byte[16384];
-
- long count = 0;
- while (count < size) {
- long nRequired = buffer.length;
- if (nRequired > size - count) {
- nRequired = size - count;
- }
- out.write(buffer, 0, (int) nRequired);
- count += nRequired;
- }
-
- out.close();
- }
-
- public final static void paddedCopy(Bucket from, Bucket to, long nBytes,
- int blockSize) throws IOException {
-
- if (nBytes > blockSize) {
- throw new IllegalArgumentException("nBytes >
blockSize");
- }
-
- OutputStream out = null;
- InputStream in = null;
-
- try {
-
- out = to.getOutputStream();
- byte[] buffer = new byte[16384];
- in = from.getInputStream();
-
- long count = 0;
- while (count != nBytes) {
- long nRequired = nBytes - count;
- if (nRequired > buffer.length) {
- nRequired = buffer.length;
- }
- long nRead = in.read(buffer, 0, (int)
nRequired);
- if (nRead == -1) {
- throw new IOException("Not enough data
in source bucket.");
- }
- out.write(buffer, 0, (int) nRead);
- count += nRead;
- }
-
- if (count < blockSize) {
- // hmmm... better to just allocate a new buffer
- // instead of explicitly zeroing the old one?
- // Zero pad to blockSize
- long padLength = buffer.length;
- if (padLength > blockSize - nBytes) {
- padLength = blockSize - nBytes;
- }
- for (int i = 0; i < padLength; i++) {
- buffer[i] = 0;
- }
-
- while (count != blockSize) {
- long nRequired = blockSize - count;
- if (blockSize - count > buffer.length) {
- nRequired = buffer.length;
- }
- out.write(buffer, 0, (int) nRequired);
- count += nRequired;
- }
- }
- } finally {
- if (in != null)
- in.close();
- if (out != null)
- out.close();
- }
- }
-
- public static class BucketFactoryWrapper implements BucketFactory {
- public BucketFactoryWrapper(BucketFactory bf) {
- BucketFactoryWrapper.this.bf = bf;
- }
- public Bucket makeBucket(long size) throws IOException {
- return bf.makeBucket(size);
- }
-
- public void freeBucket(Bucket b) throws IOException {
- if (b instanceof RandomAccessFileBucket) {
- ((RandomAccessFileBucket) b).release();
- return;
- }
- bf.freeBucket(b);
- }
- private BucketFactory bf = null;
- }
-
- public static Bucket[] makeBuckets(BucketFactory bf, int count, int
size)
- throws IOException {
- Bucket[] ret = new Bucket[count];
- for (int i = 0; i < count; i++) {
- ret[i] = bf.makeBucket(size);
- }
- return ret;
- }
-
- /**
- * Free buckets. Get yer free buckets here! No charge! All you can carry
- * free buckets!
- * <p>
- * If an exception happens the method will attempt to free the remaining
- * buckets then retun the first exception. Buckets successfully freed
are
- * made <code>null</code> in the array.
- * </p>
- *
- * @param bf
- * @param buckets
- * @throws IOException
- * the first exception The <code>buckets</code> array will
- */
- public static void freeBuckets(BucketFactory bf, Bucket[] buckets)
- throws IOException {
- if (buckets == null) {
- return;
- }
-
- IOException firstIoe = null;
-
- for (int i = 0; i < buckets.length; i++) {
- // Make sure we free any temp buckets on exception
- try {
- if (buckets[i] != null) {
- bf.freeBucket(buckets[i]);
- }
- buckets[i] = null;
- } catch (IOException e) {
- if (firstIoe == null) {
- firstIoe = e;
- }
- }
- }
-
- if (firstIoe != null) {
- throw firstIoe;
- }
- }
-
- // Note: Not all buckets are allocated by the bf.
- // You must use the BucketFactoryWrapper class above
- // to free the returned buckets.
- //
- // Always returns blocks, blocks, even if it has to create
- // zero padded ones.
- public static Bucket[] splitFile(
- File file,
- int blockSize,
- long offset,
- int blocks,
- boolean readOnly,
- BucketFactoryWrapper bf)
- throws IOException {
-
- long len = file.length() - offset;
- if (len > blocks * blockSize) {
- len = blocks * blockSize;
- }
-
- long padBlocks = 0;
- if ((blocks * blockSize) - len >= blockSize) {
- padBlocks = ((blocks * blockSize) - len) / blockSize;
- }
-
- Bucket[] ret = new Bucket[blocks];
- Bucket[] rab =
- RandomAccessFileBucket.segment(
- file,
- blockSize,
- offset,
- (int) (blocks - padBlocks),
- true);
- System.arraycopy(rab, 0, ret, 0, rab.length);
-
- boolean groovy = false;
- try {
- if (len % blockSize != 0) {
- // Copy and zero pad final partial block
- Bucket partial = ret[rab.length - 1];
- ret[rab.length - 1] = bf.makeBucket(blockSize);
- paddedCopy(
- partial,
- ret[rab.length - 1],
- len % blockSize,
- blockSize);
- }
-
- // Trailing zero padded blocks
- for (int i = rab.length; i < ret.length; i++) {
- ret[i] = bf.makeBucket(blockSize);
- zeroPad(ret[i], blockSize);
- }
- groovy = true;
- } finally {
- if (!groovy) {
- freeBuckets(bf, ret);
- }
- }
- return ret;
- }
-
- public final static int[] nullIndices(Bucket[] array) {
- List list = new ArrayList();
- for (int i = 0; i < array.length; i++) {
- if (array[i] == null) {
- list.add(new Integer(i));
- }
- }
-
- int[] ret = new int[list.size()];
- for (int i = 0; i < list.size(); i++) {
- ret[i] = ((Integer) list.get(i)).intValue();
- }
- return ret;
- }
-
- public final static int[] nonNullIndices(Bucket[] array) {
- List list = new ArrayList();
- for (int i = 0; i < array.length; i++) {
- if (array[i] != null) {
- list.add(new Integer(i));
- }
- }
-
- int[] ret = new int[list.size()];
- for (int i = 0; i < list.size(); i++) {
- ret[i] = ((Integer) list.get(i)).intValue();
- }
- return ret;
- }
-
- public final static Bucket[] nonNullBuckets(Bucket[] array) {
- List list = new ArrayList(array.length);
- for (int i = 0; i < array.length; i++) {
- if (array[i] != null) {
- list.add(array[i]);
- }
- }
-
- Bucket[] ret = new Bucket[list.size()];
- return (Bucket[]) list.toArray(ret);
- }
-
- /**
- * Read the entire bucket in as a byte array.
- * Not a good idea unless it is very small!
- * Don't call if concurrent writes may be happening.
- * @throws IOException If there was an error reading from the bucket.
- * @throws OutOfMemoryError If it was not possible to allocate enough
- * memory to contain the entire bucket.
- */
- public final static byte[] toByteArray(Bucket bucket) throws
IOException {
- long size = bucket.size();
- if(size > Integer.MAX_VALUE) throw new OutOfMemoryError();
- byte[] data = new byte[(int)size];
- InputStream is = bucket.getInputStream();
- try {
- DataInputStream dis = new DataInputStream(is);
- dis.readFully(data);
- } finally {
- is.close();
- }
- return data;
- }
-
- public static int toByteArray(Bucket bucket, byte[] output) throws
IOException {
- long size = bucket.size();
- if(size > output.length)
- throw new IllegalArgumentException("Data does not fit
in provided buffer");
- InputStream is = bucket.getInputStream();
- int moved = 0;
- while(true) {
- if(moved == size) return moved;
- int x = is.read(output, moved, (int)(size - moved));
- if(x == -1) return moved;
- moved += x;
- }
- }
-
- public static Bucket makeImmutableBucket(BucketFactory bucketFactory,
byte[] data) throws IOException {
- Bucket bucket = bucketFactory.makeBucket(data.length);
- OutputStream os = bucket.getOutputStream();
- os.write(data);
- os.close();
- bucket.setReadOnly();
- return bucket;
- }
-
- public static byte[] hash(Bucket data) throws IOException {
- InputStream is = null;
- try {
- MessageDigest md = MessageDigest.getInstance("SHA-256");
- is = data.getInputStream();
- long bucketLength = data.size();
- long bytesRead = 0;
- byte[] buf = new byte[4096];
- while(bytesRead < bucketLength || bucketLength == -1) {
- int readBytes = is.read(buf);
- if(readBytes < 0) break;
- bytesRead += readBytes;
- md.update(buf, 0, readBytes);
- }
- if(bytesRead < bucketLength && bucketLength > 0)
- throw new EOFException();
- if(bytesRead != bucketLength && bucketLength > 0)
- throw new IOException("Read "+bytesRead+" but
bucket length "+bucketLength+"!");
- return md.digest();
- } catch (NoSuchAlgorithmException e) {
- Logger.error(BucketTools.class, "No such digest:
SHA-256 !!");
- throw new Error("No such digest: SHA-256 !!");
- } finally {
- if(is != null) is.close();
- }
- }
-
- /** Copy the given quantity of data from the given bucket to the given
OutputStream.
- * @throws IOException If there was an error reading from the bucket or
writing to the stream. */
- public static void copyTo(Bucket decodedData, OutputStream os, long
truncateLength) throws IOException {
- if(truncateLength == 0) return;
- if(truncateLength < 0) truncateLength = Long.MAX_VALUE;
- InputStream is = decodedData.getInputStream();
- try {
- byte[] buf = new byte[4096];
- long moved = 0;
- while(moved < truncateLength) {
- // DO NOT move the (int) inside the Math.min()!
big numbers truncate to negative numbers.
- int bytes = (int) Math.min(buf.length,
truncateLength - moved);
- if(bytes <= 0)
- throw new
IllegalStateException("bytes="+bytes+", truncateLength="+truncateLength+",
moved="+moved);
- bytes = is.read(buf, 0, bytes);
- if(bytes <= 0) {
- if(truncateLength == Long.MAX_VALUE)
- break;
- throw new IOException("Could not move
required quantity of data: "+bytes+" (moved "+moved+" of "+truncateLength+")");
- }
- os.write(buf, 0, bytes);
- moved += bytes;
- }
- } finally {
- is.close();
- }
- }
-
- /**
- * Split the data into a series of read-only Bucket's.
- * @param origData The original data Bucket.
- * @param splitSize The number of bytes to put into each bucket.
- *
- * FIXME This could be made many orders of magnitude more efficient on
- * time and space if the underlying Bucket happens to be a passed-in
- * plaintext file!
- *
- * Note that this method will allocate a buffer of size splitSize.
- * @throws IOException If there is an error creating buckets, reading
from
- * the provided bucket, or writing to created buckets.
- */
- public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf) throws IOException {
- long length = origData.size();
- if(length > ((long)Integer.MAX_VALUE) * splitSize)
- throw new IllegalArgumentException("Way too big!:
"+length+" for "+splitSize);
- int bucketCount = (int) (length / splitSize);
- if(length % splitSize > 0) bucketCount++;
- Bucket[] buckets = new Bucket[bucketCount];
- InputStream is = origData.getInputStream();
- try {
- DataInputStream dis = new DataInputStream(is);
- long remainingLength = length;
- byte[] buf = new byte[splitSize];
- for(int i=0;i<bucketCount;i++) {
- int len = (int) Math.min(splitSize,
remainingLength);
- Bucket bucket = bf.makeBucket(len);
- buckets[i] = bucket;
- dis.readFully(buf, 0, len);
- remainingLength -= len;
- OutputStream os = bucket.getOutputStream();
- try {
- os.write(buf, 0, len);
- } finally {
- os.close();
- }
- }
- } finally {
- is.close();
- }
- return buckets;
- }
-}
Copied: branches/async-client-layer/src/freenet/support/BucketTools.java (from
rev 7873, trunk/freenet/src/freenet/support/BucketTools.java)
Deleted: branches/async-client-layer/src/freenet/support/SimpleFieldSet.java
===================================================================
--- trunk/freenet/src/freenet/support/SimpleFieldSet.java 2006-01-18
00:45:46 UTC (rev 7871)
+++ branches/async-client-layer/src/freenet/support/SimpleFieldSet.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,110 +0,0 @@
-package freenet.support;
-
-import java.io.BufferedReader;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * @author amphibian
- *
- * Very very simple FieldSet type thing, which uses the standard
- * Java facilities.
- */
-public class SimpleFieldSet {
-
- final Map map;
-
- public SimpleFieldSet(BufferedReader br) throws IOException {
- map = new HashMap();
- read(br);
- }
-
- /**
- * Empty constructor
- */
- public SimpleFieldSet() {
- map = new HashMap();
- }
-
- /**
- * Construct from a string.
- * @throws IOException if the string is too short or invalid.
- */
- public SimpleFieldSet(String content) throws IOException {
- map = new HashMap();
- StringReader sr = new StringReader(content);
- BufferedReader br = new BufferedReader(sr);
- read(br);
- }
-
- /**
- * Read from disk
- * Format:
- * blah=blah
- * blah=blah
- * End
- */
- private void read(BufferedReader br) throws IOException {
- boolean firstLine = true;
- while(true) {
- String line = br.readLine();
- if(line == null) {
- if(firstLine) throw new EOFException();
- throw new IOException();
- }
- firstLine = false;
- int index = line.indexOf('=');
- if(index >= 0) {
- // Mapping
- String before = line.substring(0, index);
- String after = line.substring(index+1);
- map.put(before, after);
- } else {
- if(line.equals("End")) return;
- throw new IOException("Unknown end-marker: \""+line+"\"");
- }
-
- }
- }
-
- public String get(String key) {
- return (String) map.get(key);
- }
-
- public void put(String key, String value) {
- map.put(key, value);
- }
-
- /**
- * Write the contents of the SimpleFieldSet to a Writer.
- * @param osr
- */
- public void writeTo(Writer w) throws IOException {
- Set s = map.entrySet();
- Iterator i = s.iterator();
- for(;i.hasNext();) {
- Map.Entry entry = (Map.Entry) i.next();
- String key = (String) entry.getKey();
- String value = (String) entry.getValue();
- w.write(key+"="+value+"\n");
- }
- w.write("End\n");
- }
-
- public String toString() {
- StringWriter sw = new StringWriter();
- try {
- writeTo(sw);
- } catch (IOException e) {
- Logger.error(this, "WTF?!: "+e+" in toString()!", e);
- }
- return sw.toString();
- }
-}
Copied: branches/async-client-layer/src/freenet/support/SimpleFieldSet.java
(from rev 7873, trunk/freenet/src/freenet/support/SimpleFieldSet.java)
Copied: branches/async-client-layer/src/freenet/support/io/LineReader.java
(from rev 7873, trunk/freenet/src/freenet/support/io/LineReader.java)
Deleted:
branches/async-client-layer/src/freenet/support/io/LineReadingInputStream.java
===================================================================
--- trunk/freenet/src/freenet/support/io/LineReadingInputStream.java
2006-01-18 00:45:46 UTC (rev 7871)
+++
branches/async-client-layer/src/freenet/support/io/LineReadingInputStream.java
2006-01-20 19:03:11 UTC (rev 7885)
@@ -1,39 +0,0 @@
-package freenet.support.io;
-
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * A FilterInputStream which provides readLine().
- */
-public class LineReadingInputStream extends FilterInputStream {
-
- public LineReadingInputStream(InputStream in) {
- super(in);
- }
-
- /**
- * Read a line of US-ASCII. Used for e.g. HTTP.
- */
- public String readLine(int maxLength, int bufferSize) throws
IOException {
- StringBuffer sb = new StringBuffer(bufferSize);
- while(true) {
- int x = read();
- if(x == -1) throw new EOFException();
- char c = (char) x;
- if(c == '\n') {
- if(sb.length() > 0) {
- if(sb.charAt(sb.length()-1) == '\r')
- sb.setLength(sb.length()-1);
- }
- return sb.toString();
- }
- sb.append(c);
- if(sb.length() >= maxLength)
- throw new TooLongException();
- }
- }
-
-}
Copied:
branches/async-client-layer/src/freenet/support/io/LineReadingInputStream.java
(from rev 7873,
trunk/freenet/src/freenet/support/io/LineReadingInputStream.java)