Author: nextgens
Date: 2006-02-03 22:55:27 +0000 (Fri, 03 Feb 2006)
New Revision: 7999

Added:
   branches/freenet-freejvms/devnotes/specs/fcp.txt
   branches/freenet-freejvms/src/freenet/client/FetchWaiter.java
   branches/freenet-freejvms/src/freenet/client/PutWaiter.java
   branches/freenet-freejvms/src/freenet/client/StartableSplitfileBlock.java
   branches/freenet-freejvms/src/freenet/client/async/
   branches/freenet-freejvms/src/freenet/client/async/BaseClientPutter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientCallback.java
   branches/freenet-freejvms/src/freenet/client/async/ClientGetState.java
   branches/freenet-freejvms/src/freenet/client/async/ClientGetter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientPutState.java
   branches/freenet-freejvms/src/freenet/client/async/ClientPutter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientRequest.java
   
branches/freenet-freejvms/src/freenet/client/async/ClientRequestScheduler.java
   branches/freenet-freejvms/src/freenet/client/async/GetCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/MinimalSplitfileBlock.java
   
branches/freenet-freejvms/src/freenet/client/async/MultiPutCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/PutCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/RequestScheduler.java
   branches/freenet-freejvms/src/freenet/client/async/SendableGet.java
   branches/freenet-freejvms/src/freenet/client/async/SendableInsert.java
   branches/freenet-freejvms/src/freenet/client/async/SendableRequest.java
   branches/freenet-freejvms/src/freenet/client/async/SimpleManifestPutter.java
   branches/freenet-freejvms/src/freenet/client/async/SingleBlockInserter.java
   branches/freenet-freejvms/src/freenet/client/async/SingleFileFetcher.java
   branches/freenet-freejvms/src/freenet/client/async/SingleFileInserter.java
   branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcher.java
   
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcherSegment.java
   branches/freenet-freejvms/src/freenet/client/async/SplitFileInserter.java
   
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserterSegment.java
   branches/freenet-freejvms/src/freenet/clients/
   branches/freenet-freejvms/src/freenet/clients/http/
   branches/freenet-freejvms/src/freenet/clients/http/FproxyToadlet.java
   branches/freenet-freejvms/src/freenet/clients/http/SimpleToadletServer.java
   branches/freenet-freejvms/src/freenet/clients/http/Toadlet.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContainer.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContext.java
   
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextClosedException.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContextImpl.java
   branches/freenet-freejvms/src/freenet/clients/http/TrivialToadlet.java
   branches/freenet-freejvms/src/freenet/io/comm/IOStatisticCollector.java
   branches/freenet-freejvms/src/freenet/keys/ClientKSK.java
   branches/freenet-freejvms/src/freenet/keys/InsertableClientSSK.java
   branches/freenet-freejvms/src/freenet/keys/KeyEncodeException.java
   branches/freenet-freejvms/src/freenet/keys/KeyVerifyException.java
   branches/freenet-freejvms/src/freenet/keys/SSKEncodeException.java
   branches/freenet-freejvms/src/freenet/node/AnyInsertSender.java
   branches/freenet-freejvms/src/freenet/node/CHKInsertSender.java
   branches/freenet-freejvms/src/freenet/node/SSKInsertHandler.java
   branches/freenet-freejvms/src/freenet/node/SSKInsertSender.java
   branches/freenet-freejvms/src/freenet/node/fcp/
   branches/freenet-freejvms/src/freenet/node/fcp/AllDataMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientGet.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientGetMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientHelloMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientPut.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientPutMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientRequest.java
   branches/freenet-freejvms/src/freenet/node/fcp/DataCarryingMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/DataFoundMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionHandler.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionInputHandler.java
   
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionOutputHandler.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPServer.java
   branches/freenet-freejvms/src/freenet/node/fcp/GenerateSSKMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/GetFailedMessage.java
   
branches/freenet-freejvms/src/freenet/node/fcp/IdentifierCollisionMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/MessageInvalidException.java
   branches/freenet-freejvms/src/freenet/node/fcp/NodeHelloMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ProtocolErrorMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/PutFailedMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/PutSuccessfulMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/SSKKeypairMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/SimpleProgressMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/URIGeneratedMessage.java
   branches/freenet-freejvms/src/freenet/support/HTMLDecoder.java
   branches/freenet-freejvms/src/freenet/support/HTMLEncoder.java
   branches/freenet-freejvms/src/freenet/support/ImmutableByteArrayWrapper.java
   branches/freenet-freejvms/src/freenet/support/IntNumberedItem.java
   branches/freenet-freejvms/src/freenet/support/LimitedEnumeration.java
   branches/freenet-freejvms/src/freenet/support/MultiValueTable.java
   branches/freenet-freejvms/src/freenet/support/NumberedItemComparator.java
   branches/freenet-freejvms/src/freenet/support/RandomGrabArray.java
   branches/freenet-freejvms/src/freenet/support/RandomGrabArrayItem.java
   branches/freenet-freejvms/src/freenet/support/RandomGrabArrayWithClient.java
   branches/freenet-freejvms/src/freenet/support/RandomGrabArrayWithInt.java
   branches/freenet-freejvms/src/freenet/support/ReadOnlyFileSliceBucket.java
   branches/freenet-freejvms/src/freenet/support/SectoredRandomGrabArray.java
   
branches/freenet-freejvms/src/freenet/support/SectoredRandomGrabArrayWithInt.java
   
branches/freenet-freejvms/src/freenet/support/SimpleIntNumberedItemComparator.java
   branches/freenet-freejvms/src/freenet/support/SortedVectorByNumber.java
   branches/freenet-freejvms/src/freenet/support/URLDecoder.java
   branches/freenet-freejvms/src/freenet/support/URLEncodedFormatException.java
   branches/freenet-freejvms/src/freenet/support/URLEncoder.java
   branches/freenet-freejvms/src/freenet/support/io/LineReader.java
   branches/freenet-freejvms/src/freenet/support/io/LineReadingInputStream.java
   branches/freenet-freejvms/src/freenet/support/io/TooLongException.java
   branches/freenet-freejvms/src/snmplib/
   branches/freenet-freejvms/src/snmplib/BERDecoder.java
   branches/freenet-freejvms/src/snmplib/BEREncoder.java
   branches/freenet-freejvms/src/snmplib/BadFormatException.java
   branches/freenet-freejvms/src/snmplib/DataConstantInt.java
   branches/freenet-freejvms/src/snmplib/DataConstantString.java
   branches/freenet-freejvms/src/snmplib/DataFetcher.java
   branches/freenet-freejvms/src/snmplib/DataStatisticsInfo.java
   branches/freenet-freejvms/src/snmplib/InfoSystem.java
   branches/freenet-freejvms/src/snmplib/MultiplexedDataFetcher.java
   branches/freenet-freejvms/src/snmplib/SNMPAgent.java
   branches/freenet-freejvms/src/snmplib/SNMPStarter.java
Removed:
   branches/freenet-freejvms/src/freenet/client/BlockFetcher.java
   branches/freenet-freejvms/src/freenet/client/BlockInserter.java
   branches/freenet-freejvms/src/freenet/client/Fetcher.java
   branches/freenet-freejvms/src/freenet/client/FileInserter.java
   branches/freenet-freejvms/src/freenet/client/InsertSegment.java
   branches/freenet-freejvms/src/freenet/client/RetryTracker.java
   branches/freenet-freejvms/src/freenet/client/RetryTrackerCallback.java
   branches/freenet-freejvms/src/freenet/client/Segment.java
   branches/freenet-freejvms/src/freenet/client/SplitFetcher.java
   branches/freenet-freejvms/src/freenet/client/SplitInserter.java
   branches/freenet-freejvms/src/freenet/client/StdSplitfileBlock.java
   branches/freenet-freejvms/src/freenet/client/async/BaseClientPutter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientCallback.java
   branches/freenet-freejvms/src/freenet/client/async/ClientGetState.java
   branches/freenet-freejvms/src/freenet/client/async/ClientGetter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientPutState.java
   branches/freenet-freejvms/src/freenet/client/async/ClientPutter.java
   branches/freenet-freejvms/src/freenet/client/async/ClientRequest.java
   
branches/freenet-freejvms/src/freenet/client/async/ClientRequestScheduler.java
   branches/freenet-freejvms/src/freenet/client/async/GetCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/MinimalSplitfileBlock.java
   
branches/freenet-freejvms/src/freenet/client/async/MultiPutCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/PutCompletionCallback.java
   branches/freenet-freejvms/src/freenet/client/async/RequestScheduler.java
   branches/freenet-freejvms/src/freenet/client/async/SendableGet.java
   branches/freenet-freejvms/src/freenet/client/async/SendableInsert.java
   branches/freenet-freejvms/src/freenet/client/async/SendableRequest.java
   branches/freenet-freejvms/src/freenet/client/async/SimpleManifestPutter.java
   branches/freenet-freejvms/src/freenet/client/async/SingleBlockInserter.java
   branches/freenet-freejvms/src/freenet/client/async/SingleFileFetcher.java
   branches/freenet-freejvms/src/freenet/client/async/SingleFileInserter.java
   branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcher.java
   
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcherSegment.java
   branches/freenet-freejvms/src/freenet/client/async/SplitFileInserter.java
   
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserterSegment.java
   branches/freenet-freejvms/src/freenet/clients/http/
   branches/freenet-freejvms/src/freenet/clients/http/FproxyToadlet.java
   branches/freenet-freejvms/src/freenet/clients/http/SimpleToadletServer.java
   branches/freenet-freejvms/src/freenet/clients/http/Toadlet.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContainer.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContext.java
   
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextClosedException.java
   branches/freenet-freejvms/src/freenet/clients/http/ToadletContextImpl.java
   branches/freenet-freejvms/src/freenet/clients/http/TrivialToadlet.java
   branches/freenet-freejvms/src/freenet/node/InsertSender.java
   branches/freenet-freejvms/src/freenet/node/QueuedDataRequest.java
   branches/freenet-freejvms/src/freenet/node/QueuedInsertRequest.java
   branches/freenet-freejvms/src/freenet/node/QueuedRequest.java
   branches/freenet-freejvms/src/freenet/node/QueueingSimpleLowLevelClient.java
   branches/freenet-freejvms/src/freenet/node/RequestStarterClient.java
   branches/freenet-freejvms/src/freenet/node/SimpleLowLevelClient.java
   branches/freenet-freejvms/src/freenet/node/fcp/AllDataMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientGet.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientGetMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientHelloMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientPut.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientPutMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ClientRequest.java
   branches/freenet-freejvms/src/freenet/node/fcp/DataCarryingMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/DataFoundMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionHandler.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionInputHandler.java
   
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionOutputHandler.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/FCPServer.java
   branches/freenet-freejvms/src/freenet/node/fcp/GenerateSSKMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/GetFailedMessage.java
   
branches/freenet-freejvms/src/freenet/node/fcp/IdentifierCollisionMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/MessageInvalidException.java
   branches/freenet-freejvms/src/freenet/node/fcp/NodeHelloMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/ProtocolErrorMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/PutFailedMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/PutSuccessfulMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/SSKKeypairMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/SimpleProgressMessage.java
   branches/freenet-freejvms/src/freenet/node/fcp/URIGeneratedMessage.java
   branches/freenet-freejvms/src/freenet/store/BaseFreenetStore.java
   branches/freenet-freejvms/src/freenet/store/DataStore.java
   branches/freenet-freejvms/src/snmplib/BERDecoder.java
   branches/freenet-freejvms/src/snmplib/BEREncoder.java
   branches/freenet-freejvms/src/snmplib/BadFormatException.java
   branches/freenet-freejvms/src/snmplib/DataConstantInt.java
   branches/freenet-freejvms/src/snmplib/DataConstantString.java
   branches/freenet-freejvms/src/snmplib/DataFetcher.java
   branches/freenet-freejvms/src/snmplib/DataStatisticsInfo.java
   branches/freenet-freejvms/src/snmplib/InfoSystem.java
   branches/freenet-freejvms/src/snmplib/MultiplexedDataFetcher.java
   branches/freenet-freejvms/src/snmplib/SNMPAgent.java
   branches/freenet-freejvms/src/snmplib/SNMPStarter.java
Modified:
   branches/freenet-freejvms/LICENSE.Freenet
   branches/freenet-freejvms/src/freenet/client/ArchiveHandler.java
   branches/freenet-freejvms/src/freenet/client/ArchiveStoreContext.java
   branches/freenet-freejvms/src/freenet/client/ClientMetadata.java
   branches/freenet-freejvms/src/freenet/client/FECCodec.java
   branches/freenet-freejvms/src/freenet/client/FailureCodeTracker.java
   branches/freenet-freejvms/src/freenet/client/FetchException.java
   branches/freenet-freejvms/src/freenet/client/FetcherContext.java
   branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClient.java
   branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClientImpl.java
   branches/freenet-freejvms/src/freenet/client/InsertBlock.java
   branches/freenet-freejvms/src/freenet/client/InserterContext.java
   branches/freenet-freejvms/src/freenet/client/InserterException.java
   branches/freenet-freejvms/src/freenet/client/Metadata.java
   branches/freenet-freejvms/src/freenet/client/SplitfileBlock.java
   branches/freenet-freejvms/src/freenet/client/StandardOnionFECCodec.java
   branches/freenet-freejvms/src/freenet/client/events/SimpleBlockPutEvent.java
   
branches/freenet-freejvms/src/freenet/client/events/SplitfileProgressEvent.java
   branches/freenet-freejvms/src/freenet/crypt/DSA.java
   branches/freenet-freejvms/src/freenet/crypt/DSAPrivateKey.java
   branches/freenet-freejvms/src/freenet/crypt/DSAPublicKey.java
   branches/freenet-freejvms/src/freenet/io/comm/DMT.java
   branches/freenet-freejvms/src/freenet/io/comm/Peer.java
   branches/freenet-freejvms/src/freenet/io/comm/UdpSocketManager.java
   branches/freenet-freejvms/src/freenet/io/xfer/BlockTransmitter.java
   branches/freenet-freejvms/src/freenet/keys/CHKBlock.java
   branches/freenet-freejvms/src/freenet/keys/CHKEncodeException.java
   branches/freenet-freejvms/src/freenet/keys/CHKVerifyException.java
   branches/freenet-freejvms/src/freenet/keys/ClientCHK.java
   branches/freenet-freejvms/src/freenet/keys/ClientCHKBlock.java
   branches/freenet-freejvms/src/freenet/keys/ClientKey.java
   branches/freenet-freejvms/src/freenet/keys/ClientKeyBlock.java
   branches/freenet-freejvms/src/freenet/keys/ClientSSK.java
   branches/freenet-freejvms/src/freenet/keys/ClientSSKBlock.java
   branches/freenet-freejvms/src/freenet/keys/FreenetURI.java
   branches/freenet-freejvms/src/freenet/keys/Key.java
   branches/freenet-freejvms/src/freenet/keys/KeyBlock.java
   branches/freenet-freejvms/src/freenet/keys/NodeCHK.java
   branches/freenet-freejvms/src/freenet/keys/NodeSSK.java
   branches/freenet-freejvms/src/freenet/keys/SSKBlock.java
   branches/freenet-freejvms/src/freenet/keys/SSKVerifyException.java
   branches/freenet-freejvms/src/freenet/node/FNPPacketMangler.java
   branches/freenet-freejvms/src/freenet/node/InsertHandler.java
   branches/freenet-freejvms/src/freenet/node/KeyTracker.java
   branches/freenet-freejvms/src/freenet/node/LocationManager.java
   branches/freenet-freejvms/src/freenet/node/LowLevelGetException.java
   branches/freenet-freejvms/src/freenet/node/LowLevelPutException.java
   branches/freenet-freejvms/src/freenet/node/Node.java
   branches/freenet-freejvms/src/freenet/node/NodeDispatcher.java
   branches/freenet-freejvms/src/freenet/node/PacketSender.java
   branches/freenet-freejvms/src/freenet/node/PeerManager.java
   branches/freenet-freejvms/src/freenet/node/PeerNode.java
   branches/freenet-freejvms/src/freenet/node/RealNodePingTest.java
   branches/freenet-freejvms/src/freenet/node/RealNodeRequestInsertTest.java
   branches/freenet-freejvms/src/freenet/node/RealNodeRoutingTest.java
   branches/freenet-freejvms/src/freenet/node/RequestHandler.java
   branches/freenet-freejvms/src/freenet/node/RequestSender.java
   branches/freenet-freejvms/src/freenet/node/RequestStarter.java
   branches/freenet-freejvms/src/freenet/node/TestnetHandler.java
   branches/freenet-freejvms/src/freenet/node/TestnetStatusUploader.java
   branches/freenet-freejvms/src/freenet/node/TextModeClientInterface.java
   branches/freenet-freejvms/src/freenet/node/Version.java
   branches/freenet-freejvms/src/freenet/store/BerkeleyDBFreenetStore.java
   branches/freenet-freejvms/src/freenet/store/FreenetStore.java
   branches/freenet-freejvms/src/freenet/support/BucketTools.java
   branches/freenet-freejvms/src/freenet/support/FileLoggerHook.java
   branches/freenet-freejvms/src/freenet/support/NumberedRecentItems.java
   
branches/freenet-freejvms/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
   branches/freenet-freejvms/src/freenet/support/Serializer.java
   branches/freenet-freejvms/src/freenet/support/SimpleFieldSet.java
   branches/freenet-freejvms/src/freenet/support/UpdatableSortedLinkedList.java
   branches/freenet-freejvms/src/freenet/support/io/FileBucket.java
   branches/freenet-freejvms/src/net/i2p/util/NativeBigInteger.java
Log:
Merged with the current trunk (r7998) almost compiling

Modified: branches/freenet-freejvms/LICENSE.Freenet
===================================================================
--- branches/freenet-freejvms/LICENSE.Freenet   2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/LICENSE.Freenet   2006-02-03 22:55:27 UTC (rev 
7999)
@@ -1,9 +1,3 @@
-As a special exception, the author(s) permit linking of the Freenet 
-binaries with the Jakarta Compress library, which is licensed under the 
-terms of the Apache Software License, version 2. Authors of derivative 
-works may [not] choose to remove this exception (and corresponding 
-source code) [at their discretion].
-
                  GNU LIBRARY GENERAL PUBLIC LICENSE
                       Version 2, June 1991


Copied: branches/freenet-freejvms/devnotes/specs/fcp.txt (from rev 7998, 
trunk/freenet/devnotes/specs/fcp.txt)

Modified: branches/freenet-freejvms/src/freenet/client/ArchiveHandler.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/ArchiveHandler.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/ArchiveHandler.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -6,7 +6,7 @@
  * The public face (to Fetcher, for example) of ArchiveStoreContext.
  * Just has methods for fetching stuff.
  */
-interface ArchiveHandler {
+public interface ArchiveHandler {

        /**
         * Get the metadata for this ZIP manifest, as a Bucket.
@@ -14,7 +14,7 @@
         * @throws MetadataParseException If there was an error parsing 
intermediary metadata.
         */
        public abstract Bucket getMetadata(ArchiveContext archiveContext,
-                       FetcherContext fetchContext, ClientMetadata dm, int 
recursionLevel, 
+                       ClientMetadata dm, int recursionLevel, 
                        boolean dontEnterImplicitArchives)
                        throws ArchiveFailureException, ArchiveRestartException,
                        MetadataParseException, FetchException;
@@ -30,10 +30,15 @@
         * @throws MetadataParseException 
         */
        public abstract Bucket get(String internalName,
-                       ArchiveContext archiveContext, FetcherContext 
fetchContext,
+                       ArchiveContext archiveContext, 
                        ClientMetadata dm, int recursionLevel, 
                        boolean dontEnterImplicitArchives)
                        throws ArchiveFailureException, ArchiveRestartException,
                        MetadataParseException, FetchException;

+       /**
+        * Get the archive type.
+        */
+       public abstract short getArchiveType();
+
 }
\ No newline at end of file

Modified: branches/freenet-freejvms/src/freenet/client/ArchiveStoreContext.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/ArchiveStoreContext.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/ArchiveStoreContext.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -12,8 +12,11 @@
  * subject to the above.
  * 
  * Always take the lock on ArchiveStoreContext before the lock on 
ArchiveManager, NOT the other way around.
+ * 
+ * Not normally to be used directly by external packages, but public for
+ * ArchiveManager.extractToCache. FIXME.
  */
-class ArchiveStoreContext implements ArchiveHandler {
+public class ArchiveStoreContext implements ArchiveHandler {

        private ArchiveManager manager;
        private FreenetURI key;
@@ -34,42 +37,29 @@
         * Get the metadata for a given archive.
         * @return A Bucket containing the metadata, in binary format, for the 
archive.
         */
-       public Bucket getMetadata(ArchiveContext archiveContext, FetcherContext 
fetchContext, ClientMetadata dm, int recursionLevel, 
+       public Bucket getMetadata(ArchiveContext archiveContext, ClientMetadata 
dm, int recursionLevel, 
                        boolean dontEnterImplicitArchives) throws 
ArchiveFailureException, ArchiveRestartException, MetadataParseException, 
FetchException {
-               return get(".metadata", archiveContext, fetchContext, dm, 
recursionLevel, dontEnterImplicitArchives);
+               return get(".metadata", archiveContext, dm, recursionLevel, 
dontEnterImplicitArchives);
        }

        /**
         * Fetch a file in an archive. Will check the cache first, then fetch 
the archive if
         * necessary.
         */
-       public Bucket get(String internalName, ArchiveContext archiveContext, 
FetcherContext fetchContext, ClientMetadata dm, int recursionLevel, 
+       public Bucket get(String internalName, ArchiveContext archiveContext, 
ClientMetadata dm, int recursionLevel, 
                        boolean dontEnterImplicitArchives) throws 
ArchiveFailureException, ArchiveRestartException, MetadataParseException, 
FetchException {

                // Do loop detection on the archive that we are about to fetch.
                archiveContext.doLoopDetection(key);

                Bucket data;
-
+               
                // Fetch from cache
                if((data = manager.getCached(key, internalName)) != null) {
                        return data;
                }

-               synchronized(this) {
-                       // Fetch from cache
-                       if((data = manager.getCached(key, internalName)) != 
null) {
-                               return data;
-                       }
-                       
-                       // Not in cache
-                       
-                       if(fetchContext == null) return null;
-                       Fetcher fetcher = new Fetcher(key, fetchContext, 
archiveContext);
-                       FetchResult result = fetcher.realRun(dm, 
recursionLevel, key, dontEnterImplicitArchives, fetchContext.localRequestOnly);
-                       manager.extractToCache(key, archiveType, result.data, 
archiveContext, this);
-                       return manager.getCached(key, internalName);
-               }
+               return null;
        }

        // Archive size
@@ -130,5 +120,9 @@
                        myItems.remove(item);
                }
        }
+
+       public short getArchiveType() {
+               return archiveType;
+       }

 }

Deleted: branches/freenet-freejvms/src/freenet/client/BlockFetcher.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/BlockFetcher.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/BlockFetcher.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,148 +0,0 @@
-/**
- * 
- */
-package freenet.client;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-public class BlockFetcher extends StdSplitfileBlock {
-
-       private final Segment segment;
-       final FreenetURI uri;
-       final boolean dontEnterImplicitArchives;
-       int completedTries;
-       boolean actuallyFetched;
-       
-       public BlockFetcher(Segment segment, RetryTracker tracker, FreenetURI 
freenetURI, int index, boolean dontEnterImplicitArchives) {
-               super(tracker, index, null);
-               this.segment = segment;
-               uri = freenetURI;
-               completedTries = 0;
-               fetchedData = null;
-               actuallyFetched = false;
-               this.dontEnterImplicitArchives = dontEnterImplicitArchives;
-       }
-
-       public String getName() {
-               return "BlockFetcher for "+getNumber();
-       }
-       
-       public void run() {
-               Logger.minor(this, "Running: "+this);
-               // Already added to runningFetches.
-               // But need to make sure we are removed when we exit.
-               try {
-                       realRun();
-               } catch (Throwable t) {
-                       fatalError(t, FetchException.INTERNAL_ERROR);
-               } finally {
-                       completedTries++;
-               }
-       }
-
-       public String toString() {
-               return super.toString()+" tries="+completedTries+" uri="+uri;
-       }
-       
-       private void realRun() {
-               // Do the fetch
-               Fetcher f = new Fetcher(uri, this.segment.blockFetchContext);
-               try {
-                       FetchResult fr = f.realRun(new ClientMetadata(), 
segment.recursionLevel, uri, 
-                                       (!this.segment.nonFullBlocksAllowed) || 
dontEnterImplicitArchives, segment.blockFetchContext.localRequestOnly || 
completedTries == 0);
-                       actuallyFetched = true;
-                       fetchedData = fr.data;
-                       Logger.minor(this, "Fetched "+fetchedData.size()+" 
bytes on "+this);
-                       tracker.success(this);
-               } catch (MetadataParseException e) {
-                       fatalError(e, FetchException.INVALID_METADATA);
-               } catch (FetchException e) {
-                       int code = e.getMode();
-                       switch(code) {
-                       case FetchException.ARCHIVE_FAILURE:
-                       case FetchException.BLOCK_DECODE_ERROR:
-                       case FetchException.HAS_MORE_METASTRINGS:
-                       case FetchException.INVALID_METADATA:
-                       case FetchException.NOT_IN_ARCHIVE:
-                       case FetchException.TOO_DEEP_ARCHIVE_RECURSION:
-                       case FetchException.TOO_MANY_ARCHIVE_RESTARTS:
-                       case FetchException.TOO_MANY_METADATA_LEVELS:
-                       case FetchException.TOO_MANY_REDIRECTS:
-                       case FetchException.TOO_MUCH_RECURSION:
-                       case FetchException.UNKNOWN_METADATA:
-                       case FetchException.UNKNOWN_SPLITFILE_METADATA:
-                               // Fatal, probably an error on insert
-                               fatalError(e, code);
-                               return;
-                       
-                       case FetchException.DATA_NOT_FOUND:
-                       case FetchException.ROUTE_NOT_FOUND:
-                       case FetchException.REJECTED_OVERLOAD:
-                       case FetchException.TRANSFER_FAILED:
-                               // Non-fatal
-                               nonfatalError(e, code);
-                               return;
-                               
-                       case FetchException.BUCKET_ERROR:
-                       case FetchException.INTERNAL_ERROR:
-                               // Maybe fatal
-                               nonfatalError(e, code);
-                               return;
-                       }
-               } catch (ArchiveFailureException e) {
-                       fatalError(e, FetchException.ARCHIVE_FAILURE);
-               } catch (ArchiveRestartException e) {
-                       Logger.error(this, "Got an ArchiveRestartException in a 
splitfile - WTF?");
-                       fatalError(e, FetchException.ARCHIVE_FAILURE);
-               }
-       }
-
-       private void fatalError(Throwable e, int code) {
-               Logger.error(this, "Giving up on block: "+this+": "+e, e);
-               tracker.fatalError(this, code);
-       }
-
-       private void nonfatalError(Exception e, int code) {
-               Logger.minor(this, "Non-fatal error on "+this+": "+e);
-               tracker.nonfatalError(this, code);
-       }
-       
-       public boolean succeeded() {
-               return fetchedData != null;
-       }
-
-       /**
-        * Queue a healing block for insert.
-        * Will be implemented using the download manager.
-        * FIXME: implement!
-        */
-       public void queueHeal() {
-               // TODO Auto-generated method stub
-               
-       }
-
-       public void kill() {
-               // Do nothing, for now
-       }
-
-       public FreenetURI getURI() {
-               return uri;
-       }
-       
-       public void setData(Bucket data) {
-               actuallyFetched = false;
-               super.setData(data);
-       }
-
-       protected void checkStartable() {
-               if(fetchedData != null) {
-                       throw new IllegalStateException("Already have data");
-               }
-       }
-
-       public int getRetryCount() {
-               return completedTries;
-       }
-}
\ No newline at end of file

Deleted: branches/freenet-freejvms/src/freenet/client/BlockInserter.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/BlockInserter.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/BlockInserter.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,150 +0,0 @@
-package freenet.client;
-
-import freenet.client.events.BlockInsertErrorEvent;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-/**
- * Inserts a single splitfile block.
- */
-public class BlockInserter extends StdSplitfileBlock implements Runnable {
-
-       private boolean succeeded;
-       private int completedTries;
-       private final InserterContext ctx;
-       private final InsertBlock block;
-       private FreenetURI uri;
-       private final boolean getCHKOnly;
-       /** RNF count. We can count many consecutive RNFs as success. */
-       private int rnfs;
-       
-       /**
-        * Create a BlockInserter.
-        * @param bucket The data to insert, or null if it will be filled in 
later.
-        * @param num The block number in the splitfile.
-        */
-       public BlockInserter(Bucket bucket, int num, RetryTracker tracker, 
InserterContext ctx, boolean getCHKOnly) {
-               super(tracker, num, bucket);
-               succeeded = false;
-               this.ctx = ctx;
-               block = new InsertBlock(bucket, null, FreenetURI.EMPTY_CHK_URI);
-               this.getCHKOnly = getCHKOnly;
-               Logger.minor(this, "Created "+this);
-       }
-
-       public synchronized void setData(Bucket data) {
-               if(this.fetchedData != null) throw new 
IllegalArgumentException("Cannot set data when already have data");
-               block.data = data;
-               super.setData(data);
-       }
-
-       public void kill() {
-               // Do nothing, for now.
-       }
-
-       public String toString() {
-               return super.toString()+" succeeded="+succeeded+" 
tries="+completedTries+" uri="+uri;
-       }
-       
-       public FreenetURI getURI() {
-               return uri;
-       }
-
-       public String getName() {
-               return "BlockInserter for "+this.getNumber();
-       }
-       
-       public void run() {
-               try {
-                       Logger.minor(this, "Running "+this);
-                       if(fetchedData == null)
-                               throw new NullPointerException();
-                       realRun();
-               } catch (Throwable t) {
-                       Logger.error(this, "Caught "+t+" on "+this, t);
-                       fatalError(t, InserterException.INTERNAL_ERROR);
-               } finally {
-                       completedTries++;
-               }
-       }
-       
-       private void realRun() {
-               FileInserter inserter = new FileInserter(ctx);
-               try {
-                       if(uri == null && !getCHKOnly)
-                               uri = inserter.run(block, false, true, true);
-                       uri = inserter.run(block, false, getCHKOnly, true);
-                       succeeded = true;
-                       tracker.success(this);
-               } catch (InserterException e) {
-                       int mode = e.getMode();
-                       switch(mode) {
-                       case InserterException.ROUTE_NOT_FOUND:
-                               // N consecutive RNFs = success
-                               if(ctx.consecutiveRNFsCountAsSuccess > 0) {
-                                       rnfs++;
-                                       if(rnfs >= 
ctx.consecutiveRNFsCountAsSuccess) {
-                                               succeeded = true;
-                                               tracker.success(this);
-                                               return;
-                                       }
-                               }
-                               nonfatalError(e, mode);
-                               return;
-                       case InserterException.REJECTED_OVERLOAD:
-                       case InserterException.ROUTE_REALLY_NOT_FOUND:
-                               rnfs = 0;
-                               nonfatalError(e, mode);
-                               return;
-                       case InserterException.INTERNAL_ERROR:
-                       case InserterException.BUCKET_ERROR:
-                               fatalError(e, mode);
-                               return;
-                       case InserterException.FATAL_ERRORS_IN_BLOCKS:
-                       case InserterException.TOO_MANY_RETRIES_IN_BLOCKS:
-                               // Huh?
-                               Logger.error(this, "Got error inserting blocks 
("+e.getMessage()+") while inserting a block - WTF?");
-                               fatalError(e, InserterException.INTERNAL_ERROR);
-                               return;
-                       case InserterException.INVALID_URI:
-                               Logger.error(this, "Got invalid URI error but 
URI was CHK@ in block insert");
-                               fatalError(e, InserterException.INTERNAL_ERROR);
-                               return;
-                       default:
-                               rnfs = 0;
-                               Logger.error(this, "Unknown insert error 
"+mode+" while inserting a block");
-                               fatalError(e, InserterException.INTERNAL_ERROR);
-                               return;
-                       }
-                       // FIXME add more cases as we create them
-               }
-               
-       }
-
-       private void fatalError(InserterException e, int code) {
-               Logger.normal(this, "Giving up on block: "+this+": "+e);
-               tracker.fatalError(this, code);
-               ctx.eventProducer.produceEvent(new BlockInsertErrorEvent(e, 
uri, completedTries));
-       }
-
-       private void fatalError(Throwable t, int code) {
-               // Don't need to include uri
-               fatalError(new InserterException(code, t, null), code);
-       }
-
-       private void nonfatalError(InserterException e, int code) {
-               Logger.minor(this, "Non-fatal error on "+this+": "+e);
-               tracker.nonfatalError(this, code);
-               ctx.eventProducer.produceEvent(new BlockInsertErrorEvent(e, 
uri, completedTries));
-       }
-       
-       protected void checkStartable() {
-               if(succeeded)
-                       throw new IllegalStateException("Already inserted 
block");
-       }
-
-       public int getRetryCount() {
-               return completedTries;
-       }
-}

Modified: branches/freenet-freejvms/src/freenet/client/ClientMetadata.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/ClientMetadata.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/ClientMetadata.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -13,7 +13,7 @@
        }

        /** Create an empty ClientMetadata instance */
-       ClientMetadata() {
+       public ClientMetadata() {
                mimeType = null;
        }

@@ -35,6 +35,6 @@
        }

        public boolean isTrivial() {
-               return (mimeType == null || 
mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE));
+               return (mimeType == null || mimeType.equals(""));
        }
 }

Modified: branches/freenet-freejvms/src/freenet/client/FECCodec.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/FECCodec.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/FECCodec.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -2,6 +2,7 @@

 import java.io.IOException;

+import freenet.support.Bucket;
 import freenet.support.BucketFactory;

 /**
@@ -11,7 +12,7 @@
  * @author root
  *
  */
-abstract class FECCodec {
+public abstract class FECCodec {

        /**
         * Get a codec where we know both the number of data blocks and the 
number
@@ -67,6 +68,17 @@
        public abstract void encode(SplitfileBlock[] dataBlocks, 
SplitfileBlock[] checkBlocks, int blockLength, BucketFactory bucketFactory) 
throws IOException;

        /**
+        * Encode all missing *check* blocks.
+        * Requires that all the data blocks be present.
+        * @param dataBlocks The data blocks.
+        * @param checkBlocks The check blocks.
+        * @param blockLength The block length in bytes.
+        * @param bf The BucketFactory to use to generate buckets.
+        * @throws IOException If there is an error in decoding caused by an 
I/O error (usually involving buckets).
+        */
+       public abstract void encode(Bucket[] dataBlocks, Bucket[] checkBlocks, 
int blockLength, BucketFactory bucketFactory) throws IOException;
+
+       /**
         * How many check blocks?
         */
        public abstract int countCheckBlocks();

Modified: branches/freenet-freejvms/src/freenet/client/FailureCodeTracker.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/FailureCodeTracker.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/FailureCodeTracker.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -4,6 +4,8 @@
 import java.util.HashMap;
 import java.util.Iterator;

+import freenet.support.SimpleFieldSet;
+
 /**
  * Essentially a map of integer to incrementible integer.
  * FIXME maybe move this to support, give it a better name?
@@ -11,6 +13,7 @@
 public class FailureCodeTracker {

        public final boolean insert;
+       private int total;

        public FailureCodeTracker(boolean insert) {
                this.insert = insert;
@@ -28,6 +31,7 @@
                if(i == null)
                        map.put(key, i = new Item());
                i.x++;
+               total++;
        }

        public synchronized void inc(Integer key, int val) {
@@ -35,6 +39,7 @@
                if(i == null)
                        map.put(key, i = new Item());
                i.x+=val;
+               total += val;
        }

        public synchronized String toVerboseString() {
@@ -53,14 +58,78 @@
                return sb.toString();
        }

-       public synchronized FailureCodeTracker merge(FailureCodeTracker 
accumulatedFatalErrorCodes) {
-               Iterator keys = map.keySet().iterator();
+       /**
+        * Merge codes from another tracker into this one.
+        */
+       public synchronized FailureCodeTracker merge(FailureCodeTracker source) 
{
+               Iterator keys = source.map.keySet().iterator();
                while(keys.hasNext()) {
                        Integer k = (Integer) keys.next();
-                       Item item = (Item) map.get(k);
+                       Item item = (Item) source.map.get(k);
                        inc(k, item.x);
                }
                return this;
        }
+
+       public void merge(FetchException e) {
+               if(insert) throw new IllegalStateException("Merging a 
FetchException in an insert!");
+               if(e.errorCodes != null) {
+                       merge(e.errorCodes);
+               }
+               // Increment mode anyway, so we get the splitfile error as well.
+               inc(e.mode);
+       }
+
+       public synchronized int totalCount() {
+               return total;
+       }
+
+       /** Copy verbosely to a SimpleFieldSet */
+       public synchronized void copyToFieldSet(SimpleFieldSet sfs, String 
prefix) {
+               Iterator keys = map.keySet().iterator();
+               while(keys.hasNext()) {
+                       Integer k = (Integer) keys.next();
+                       Item item = (Item) map.get(k);
+                       int code = k.intValue();
+                       // prefix.num.Description=<code description>
+                       // prefix.num.Count=<count>
+                       
sfs.put(prefix+Integer.toHexString(code)+".Description", 
+                                       insert ? 
InserterException.getMessage(code) : FetchException.getMessage(code));
+                       sfs.put(prefix+Integer.toHexString(code)+".Count", 
Integer.toHexString(item.x));
+               }
+       }
+
+       public synchronized boolean isOneCodeOnly() {
+               return map.size() == 1;
+       }
+
+       public synchronized int getFirstCode() {
+               return ((Integer) map.keySet().toArray()[0]).intValue();
+       }
+
+       public synchronized boolean isFatal(boolean insert) {
+               Iterator i = map.keySet().iterator();
+               while(i.hasNext()) {
+                       Integer code = (Integer) i.next();
+                       if(((Item)map.get(code)).x == 0) continue;
+                       if(insert) {
+                               if(InserterException.isFatal(code.intValue())) 
return true;
+                       } else {
+                               if(FetchException.isFatal(code.intValue())) 
return true;
+                       }
+               }
+               return false;
+       }
+
+       public void merge(InserterException e) {
+               if(!insert) throw new IllegalArgumentException("This is not an 
insert yet merge("+e+") called!");
+               if(e.errorCodes != null)
+                       merge(e.errorCodes);
+               inc(e.getMode());
+       }
+
+       public boolean isEmpty() {
+               return map.isEmpty();
+       }

 }

Modified: branches/freenet-freejvms/src/freenet/client/FetchException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/FetchException.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/FetchException.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,7 +1,5 @@
 package freenet.client;

-import java.io.IOException;
-
 import freenet.support.Logger;

 /**
@@ -17,6 +15,8 @@
        /** For collection errors */
        public final FailureCodeTracker errorCodes;

+       public final String extraMessage;
+       
        /** Get the failure mode. */
        public int getMode() {
                return mode;
@@ -24,6 +24,7 @@

        public FetchException(int m) {
                super(getMessage(m));
+               extraMessage = null;
                mode = m;
                errorCodes = null;
                Logger.minor(this, "FetchException("+getMessage(mode)+")", 
this);
@@ -31,6 +32,7 @@

        public FetchException(MetadataParseException e) {
                super(getMessage(INVALID_METADATA)+": "+e.getMessage());
+               extraMessage = e.getMessage();
                mode = INVALID_METADATA;
                errorCodes = null;
                initCause(e);
@@ -38,15 +40,25 @@
        }

        public FetchException(ArchiveFailureException e) {
-               super(getMessage(INVALID_METADATA)+": "+e.getMessage());
+               super(getMessage(ARCHIVE_FAILURE)+": "+e.getMessage());
+               extraMessage = e.getMessage();
                mode = ARCHIVE_FAILURE;
                errorCodes = null;
                initCause(e);
                Logger.minor(this, "FetchException("+getMessage(mode)+"): 
"+e,e);
        }

+       public FetchException(ArchiveRestartException e) {
+               super(getMessage(ARCHIVE_RESTART)+": "+e.getMessage());
+               extraMessage = e.getMessage();
+               mode = ARCHIVE_FAILURE;
+               errorCodes = null;
+               initCause(e);
+               Logger.minor(this, "FetchException("+getMessage(mode)+"): 
"+e,e);       }
+
        public FetchException(int mode, Throwable t) {
                super(getMessage(mode)+": "+t.getMessage());
+               extraMessage = t.getMessage();
                this.mode = mode;
                errorCodes = null;
                initCause(t);
@@ -55,6 +67,7 @@

        public FetchException(int mode, FailureCodeTracker errorCodes) {
                super(getMessage(mode));
+               extraMessage = null;
                this.mode = mode;
                this.errorCodes = errorCodes;
                Logger.minor(this, "FetchException("+getMessage(mode)+")");
@@ -63,11 +76,71 @@

        public FetchException(int mode, String msg) {
                super(getMessage(mode)+": "+msg);
+               extraMessage = msg;
                errorCodes = null;
                this.mode = mode;
                Logger.minor(this, "FetchException("+getMessage(mode)+"): 
"+msg,this);
        }

+       public static String getShortMessage(int mode) {
+               switch(mode) {
+               case TOO_DEEP_ARCHIVE_RECURSION:
+                       return "Too deep archive recursion";
+               case UNKNOWN_SPLITFILE_METADATA:
+                       return "Unknown splitfile metadata";
+               case TOO_MANY_REDIRECTS:
+                       return "Too many redirects";
+               case UNKNOWN_METADATA:
+                       return "Unknown metadata";
+               case INVALID_METADATA:
+                       return "Invalid metadata";
+               case ARCHIVE_FAILURE:
+                       return "Archive failure";
+               case BLOCK_DECODE_ERROR:
+                       return "Block decode error";
+               case TOO_MANY_METADATA_LEVELS:
+                       return "Too many metadata levels";
+               case TOO_MANY_ARCHIVE_RESTARTS:
+                       return "Too many archive restarts";
+               case TOO_MUCH_RECURSION:
+                       return "Too much recursion";
+               case NOT_IN_ARCHIVE:
+                       return "Not in archive";
+               case HAS_MORE_METASTRINGS:
+                       return "Not a manifest";
+               case BUCKET_ERROR:
+                       return "Temporary files error";
+               case DATA_NOT_FOUND:
+                       return "Data not found";
+               case ROUTE_NOT_FOUND:
+                       return "Route not found";
+               case REJECTED_OVERLOAD:
+                       return "Timeout or overload";
+               case INTERNAL_ERROR:
+                       return "Internal error";
+               case TRANSFER_FAILED:
+                       return "Transfer failed";
+               case SPLITFILE_ERROR:
+                       return "Splitfile error";
+               case INVALID_URI:
+                       return "Invalid URI";
+               case TOO_BIG:
+                       return "Too big";
+               case TOO_BIG_METADATA:
+                       return "Metadata too big";
+               case TOO_MANY_BLOCKS_PER_SEGMENT:
+                       return "Too many blocks per segment";
+               case NOT_ENOUGH_METASTRINGS:
+                       return "Not enough meta-strings"; // FIXME better 
description
+               case CANCELLED:
+                       return "Cancelled";
+               case ARCHIVE_RESTART:
+                       return "Archive restarted";
+               default:
+                       return "Unknown code "+mode;
+               }
+       }
+       
        public static String getMessage(int mode) {
                switch(mode) {
                case TOO_DEEP_ARCHIVE_RECURSION:
@@ -83,7 +156,7 @@
                case ARCHIVE_FAILURE:
                        return "Failure in extracting files from an archive";
                case BLOCK_DECODE_ERROR:
-                       return "Failed to decode a splitfile block";
+                       return "Failed to decode a block";
                case TOO_MANY_METADATA_LEVELS:
                        return "Too many levels of split metadata";
                case TOO_MANY_ARCHIVE_RESTARTS:
@@ -95,7 +168,7 @@
                case HAS_MORE_METASTRINGS:
                        return "Not a manifest";
                case BUCKET_ERROR:
-                       return "Internal error, maybe disk full or permissions 
problem?";
+                       return "Internal temp files error, maybe disk full or 
permissions problem?";
                case DATA_NOT_FOUND:
                        return "Data not found";
                case ROUTE_NOT_FOUND:
@@ -116,6 +189,13 @@
                        return "Metadata too big";
                case TOO_MANY_BLOCKS_PER_SEGMENT:
                        return "Too many blocks per segment";
+               case NOT_ENOUGH_METASTRINGS:
+                       return "Give more metastrings (path components) in URI";
+                       // FIXME better description for above
+               case CANCELLED:
+                       return "Cancelled by caller";
+               case ARCHIVE_RESTART:
+                       return "Archive restarted";
                default:
                        return "Unknown fetch error code: "+mode;
                }
@@ -169,4 +249,63 @@
        public static final int TOO_BIG_METADATA = 22;
        /** Splitfile has too big segments */
        public static final int TOO_MANY_BLOCKS_PER_SEGMENT = 23;
+       /** Not enough meta strings in URI given and no default document */
+       public static final int NOT_ENOUGH_METASTRINGS = 24;
+       /** Explicitly cancelled */
+       public static final int CANCELLED = 25;
+       /** Archive restart */
+       public static final int ARCHIVE_RESTART = 26;
+
+       /** Is an error fatal i.e. is there no point retrying? */
+       public boolean isFatal() {
+               return isFatal(mode);
+       }
+
+       public static boolean isFatal(int mode) {
+               switch(mode) {
+               // Problems with the data as inserted. No point retrying.
+               case FetchException.ARCHIVE_FAILURE:
+               case FetchException.BLOCK_DECODE_ERROR:
+               case FetchException.HAS_MORE_METASTRINGS:
+               case FetchException.NOT_ENOUGH_METASTRINGS:
+               case FetchException.INVALID_METADATA:
+               case FetchException.NOT_IN_ARCHIVE:
+               case FetchException.TOO_DEEP_ARCHIVE_RECURSION:
+               case FetchException.TOO_MANY_ARCHIVE_RESTARTS:
+               case FetchException.TOO_MANY_METADATA_LEVELS:
+               case FetchException.TOO_MANY_REDIRECTS:
+               case FetchException.TOO_MUCH_RECURSION:
+               case FetchException.UNKNOWN_METADATA:
+               case FetchException.UNKNOWN_SPLITFILE_METADATA:
+               case FetchException.INVALID_URI:
+               case FetchException.TOO_BIG:
+                       return true;
+
+               // Low level errors, can be retried
+               case FetchException.DATA_NOT_FOUND:
+               case FetchException.ROUTE_NOT_FOUND:
+               case FetchException.REJECTED_OVERLOAD:
+               case FetchException.TRANSFER_FAILED:
+                       return false;
+                       
+               case FetchException.BUCKET_ERROR:
+               case FetchException.INTERNAL_ERROR:
+                       // Maybe fatal
+                       return false;
+                       
+               case FetchException.SPLITFILE_ERROR:
+                       // Fatal, because there are internal retries
+                       return true;
+                       
+                       // Wierd ones
+               case FetchException.CANCELLED:
+               case FetchException.ARCHIVE_RESTART:
+                       // Fatal
+                       return true;
+                       
+               default:
+                       Logger.error(FetchException.class, "Do not know if 
error code is fatal: "+getMessage(mode));
+                       return false; // assume it isn't
+               }
+       }
 }

Copied: branches/freenet-freejvms/src/freenet/client/FetchWaiter.java (from rev 
7998, trunk/freenet/src/freenet/client/FetchWaiter.java)

Deleted: branches/freenet-freejvms/src/freenet/client/Fetcher.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/Fetcher.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/Fetcher.java   2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,333 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.LinkedList;
-
-import freenet.client.events.DecodedBlockEvent;
-import freenet.client.events.FetchedMetadataEvent;
-import freenet.client.events.GotBlockEvent;
-import freenet.keys.ClientCHK;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.FreenetURI;
-import freenet.keys.KeyBlock;
-import freenet.keys.KeyDecodeException;
-import freenet.node.LowLevelGetException;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-/** Class that does the actual fetching. Does not have to have a user friendly
- * interface!
- */
-class Fetcher {
-
-       /** The original URI to be fetched. */
-       final FreenetURI origURI;
-       /** The settings for the fetch e.g. max file size */
-       final FetcherContext ctx;
-       /** The archive context object to be passed down the entire request. 
This is
-        * recreated if we get an ArchiveRestartException. It does loop 
detection, partly
-        * in order to prevent rare deadlocks.
-        */
-       ArchiveContext archiveContext;
-       
-       /**
-        * Local-only constructor, with ArchiveContext, for recursion via e.g. 
archives.
-        */
-       Fetcher(FreenetURI uri, FetcherContext fctx, ArchiveContext actx) {
-               if(uri == null) throw new NullPointerException();
-               origURI = uri;
-               ctx = fctx;
-               archiveContext = actx;
-       }
-
-       /**
-        * Create a Fetcher. Public constructor, for when starting a new 
request chain.
-        * @param uri The key to fetch.
-        * @param ctx The settings for the fetch.
-        */
-       public Fetcher(FreenetURI uri, FetcherContext ctx) {
-               this(uri, ctx, new ArchiveContext());
-       }
-       
-       /**
-        * Fetch the key. Called by clients.
-        * @return The key requested's data and client metadata.
-        * @throws FetchException If we cannot fetch the key for some reason. 
Various
-        * other exceptions are used internally; they are converted to a 
FetchException
-        * by this driver routine.
-        */
-       public FetchResult run() throws FetchException {
-               for(int i=0;i<ctx.maxArchiveRestarts;i++) {
-                       try {
-                               ClientMetadata dm = new ClientMetadata();
-                               return realRun(dm, 0, origURI, 
ctx.dontEnterImplicitArchives, ctx.localRequestOnly);
-                       } catch (ArchiveRestartException e) {
-                               archiveContext = new ArchiveContext();
-                               continue;
-                       } catch (MetadataParseException e) {
-                               throw new FetchException(e);
-                       } catch (ArchiveFailureException e) {
-                               
if(e.getMessage().equals(ArchiveFailureException.TOO_MANY_LEVELS))
-                                       throw new 
FetchException(FetchException.TOO_DEEP_ARCHIVE_RECURSION);
-                               throw new FetchException(e);
-                       }
-               }
-               throw new 
FetchException(FetchException.TOO_MANY_ARCHIVE_RESTARTS);
-       }
-       
-       /**
-        * Fetch a key, within an overall fetch process. Called by self in 
recursion, and
-        * called by driver function @see run() .
-        * @param dm The client metadata object to accumulate client metadata 
in.
-        * @param recursionLevel The recursion level. Incremented every time we 
enter
-        * realRun(). If it goes above a certain limit, we throw a 
FetchException.
-        * @param uri The URI to fetch.
-        * @return The data, complete with client metadata.
-        * @throws FetchException If we could not fetch the data.
-        * @throws MetadataParseException If we could not parse the metadata.
-        * @throws ArchiveFailureException If we could not extract data from an 
archive.
-        * @throws ArchiveRestartException 
-        */
-       FetchResult realRun(ClientMetadata dm, int recursionLevel, FreenetURI 
uri, boolean dontEnterImplicitArchives, boolean localOnly) 
-       throws FetchException, MetadataParseException, ArchiveFailureException, 
ArchiveRestartException {
-               Logger.minor(this, "Running fetch for: "+uri);
-               ClientKey key;
-               try {
-                       key = ClientKey.getBaseKey(uri);
-               } catch (MalformedURLException e2) {
-                       throw new FetchException(FetchException.INVALID_URI, 
"Invalid URI: "+uri);
-               }
-               LinkedList metaStrings = uri.listMetaStrings();
-               
-               recursionLevel++;
-               if(recursionLevel > ctx.maxRecursionLevel)
-                       throw new 
FetchException(FetchException.TOO_MUCH_RECURSION, ""+recursionLevel+" should be 
< "+ctx.maxRecursionLevel);
-               
-               // Do the fetch
-               ClientKeyBlock block;
-               try {
-                       block = ctx.client.getKey(key, localOnly, 
ctx.starterClient, ctx.cacheLocalRequests);
-               } catch (LowLevelGetException e) {
-                       switch(e.code) {
-                       case LowLevelGetException.DATA_NOT_FOUND:
-                               throw new 
FetchException(FetchException.DATA_NOT_FOUND);
-                       case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
-                               throw new 
FetchException(FetchException.DATA_NOT_FOUND);
-                       case LowLevelGetException.DECODE_FAILED:
-                               throw new 
FetchException(FetchException.BLOCK_DECODE_ERROR);
-                       case LowLevelGetException.INTERNAL_ERROR:
-                               throw new 
FetchException(FetchException.INTERNAL_ERROR);
-                       case LowLevelGetException.REJECTED_OVERLOAD:
-                               throw new 
FetchException(FetchException.REJECTED_OVERLOAD);
-                       case LowLevelGetException.ROUTE_NOT_FOUND:
-                               throw new 
FetchException(FetchException.ROUTE_NOT_FOUND);
-                       case LowLevelGetException.TRANSFER_FAILED:
-                               throw new 
FetchException(FetchException.TRANSFER_FAILED);
-                       case LowLevelGetException.VERIFY_FAILED:
-                               throw new 
FetchException(FetchException.BLOCK_DECODE_ERROR);
-                       default:
-                               Logger.error(this, "Unknown 
LowLevelGetException code: "+e.code);
-                               throw new 
FetchException(FetchException.INTERNAL_ERROR);
-                       }
-               }
-               
-               ctx.eventProducer.produceEvent(new GotBlockEvent(key));
-               
-               Bucket data;
-               try {
-                       data = block.decode(ctx.bucketFactory, (int) 
(Math.min(ctx.maxTempLength, Integer.MAX_VALUE)));
-               } catch (KeyDecodeException e1) {
-                       throw new 
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage());
-               } catch (IOException e) {
-                       Logger.error(this, "Could not capture data - disk 
full?: "+e, e);
-                       throw new FetchException(FetchException.BUCKET_ERROR, 
e);
-               }
-               
-               ctx.eventProducer.produceEvent(new DecodedBlockEvent(key));
-               
-               if(!block.isMetadata()) {
-                       // Just return the data
-                       return new FetchResult(dm, data);
-               }
-               
-               // Otherwise we need to parse the metadata
-
-               if(data.size() > ctx.maxMetadataSize)
-                       throw new 
FetchException(FetchException.TOO_BIG_METADATA);
-               Metadata metadata;
-               try {
-                       metadata = 
Metadata.construct(BucketTools.toByteArray(data));
-               } catch (IOException e) {
-                       throw new FetchException(FetchException.BUCKET_ERROR, 
e);
-               }
-               
-               ctx.eventProducer.produceEvent(new FetchedMetadataEvent());
-               
-               FetchResult result = runMetadata(dm, recursionLevel, key, 
metaStrings, metadata, null, key.getURI(), dontEnterImplicitArchives, 
localOnly);
-               if(metaStrings.isEmpty()) return result;
-               // Still got some meta-strings
-               throw new FetchException(FetchException.HAS_MORE_METASTRINGS);
-       }
-       
-       /**
-        * Fetch data, from metadata.
-        * @param recursionLevel The recursion level, from above. Not 
incremented here, as we will
-        * go through realRun() if the key changes, so the number of passes 
here is severely limited.
-        * @param key The key being fetched.
-        * @param metaStrings List of unused meta strings (to be used by 
manifests).
-        * @param metadata The parsed metadata to process.
-        * @param container The container in which this metadata is found.
-        * @return
-        * @throws MetadataParseException If we could not parse metadata from a 
sub-document. Will be
-        * converted to a FetchException above.
-        * @throws ArchiveFailureException If extracting data from an archive 
failed.
-        * @throws FetchException If the fetch failed for some reason.
-        * @throws ArchiveRestartException 
-        */
-       private FetchResult runMetadata(ClientMetadata dm, int recursionLevel, 
ClientKey key, LinkedList metaStrings, 
-                       Metadata metadata, ArchiveHandler container, FreenetURI 
thisKey, boolean dontEnterImplicitArchives, boolean localOnly) 
-       throws MetadataParseException, FetchException, ArchiveFailureException, 
ArchiveRestartException {
-               
-               if(metadata.isSimpleManifest()) {
-                       String name = (String) metaStrings.removeFirst();
-                       // Since metadata is a document, we just replace 
metadata here
-                       if(name == null) {
-                               metadata = metadata.getDefaultDocument();
-                       } else {
-                               metadata = metadata.getDocument(name);
-                               thisKey = thisKey.pushMetaString(name);
-                       }
-                       return runMetadata(dm, recursionLevel, key, 
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives, 
localOnly);
-               } else if(metadata.isArchiveManifest()) {
-                       container = ctx.archiveManager.makeHandler(thisKey, 
metadata.getArchiveType(), false);
-                       Bucket metadataBucket = 
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
-                       try {
-                               metadata = Metadata.construct(metadataBucket);
-                       } catch (IOException e) {
-                               throw new 
FetchException(FetchException.BUCKET_ERROR);
-                       }
-                       return runMetadata(dm, recursionLevel+1, key, 
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives, 
localOnly);
-               } else if(metadata.isArchiveInternalRedirect()) {
-                       if(container == null)
-                               throw new 
FetchException(FetchException.NOT_IN_ARCHIVE);
-                       else {
-                               /* Implicit archive handling:
-                                * Sooner or later we reach a 
SimpleFileRedirect to data, a Splitfile to data,
-                                * or an ArchiveInternalRedirect to data.
-                                * 
-                                * In this case, if it is an archive type, if 
implicit archive handling is enabled, and if
-                                * we have more meta-strings, we can try to 
enter it.
-                                */
-                               if((!dontEnterImplicitArchives) && 
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) && 
(!metaStrings.isEmpty())) {
-                                       // Possible implicit archive inside 
archive?
-                                       container = 
ctx.archiveManager.makeHandler(thisKey, 
ArchiveManager.getArchiveType(dm.getMIMEType()), false);
-                                       Bucket metadataBucket = 
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
-                                       try {
-                                               metadata = 
Metadata.construct(metadataBucket);
-                                       } catch (IOException e) {
-                                               throw new 
FetchException(FetchException.BUCKET_ERROR);
-                                       }
-                                       return runMetadata(dm, 
recursionLevel+1, key, metaStrings, metadata, container, thisKey, 
dontEnterImplicitArchives, localOnly);
-                               }
-                               Bucket result = 
container.get(metadata.getZIPInternalName(), archiveContext, ctx, dm, 
recursionLevel, true);
-                               
dm.mergeNoOverwrite(metadata.getClientMetadata());
-                               return new FetchResult(dm, result);
-                       }
-               } else if(metadata.isMultiLevelMetadata()) {
-                       // Doesn't have to be a splitfile; could be from a ZIP 
or a plain file.
-                       metadata.setSimpleRedirect();
-                       FetchResult res = runMetadata(dm, recursionLevel, key, 
metaStrings, metadata, container, thisKey, true, localOnly);
-                       try {
-                               metadata = Metadata.construct(res.data);
-                       } catch (IOException e) {
-                               throw new 
FetchException(FetchException.BUCKET_ERROR);
-                       }
-                       return runMetadata(dm, recursionLevel, key, 
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives, 
localOnly);
-               } else if(metadata.isSingleFileRedirect()) {
-                       FreenetURI uri = metadata.getSingleTarget();
-                       dm.mergeNoOverwrite(metadata.getClientMetadata());
-                       if((!dontEnterImplicitArchives) && 
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) && 
(!metaStrings.isEmpty())) {
-                               // Is probably an implicit archive.
-                               ClientKey target;
-                               try {
-                                       target = ClientKey.getBaseKey(uri);
-                               } catch (MalformedURLException e1) {
-                                       throw new 
FetchException(FetchException.INVALID_URI, "Invalid URI: "+uri);
-                               }
-                               // Probably a usable archive as-is. We may not 
have to fetch it.
-                               container = ctx.archiveManager.makeHandler(uri, 
ArchiveManager.getArchiveType(dm.getMIMEType()), true);
-                               if(container != null) {
-                                       Bucket metadataBucket = 
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
-                                       try {
-                                               metadata = 
Metadata.construct(metadataBucket);
-                                       } catch (IOException e) {
-                                               throw new 
FetchException(FetchException.BUCKET_ERROR);
-                                       }
-                                       return runMetadata(dm, 
recursionLevel+1, key, metaStrings, metadata, container, thisKey, 
dontEnterImplicitArchives, localOnly);
-                               } // else just fetch it, create context later
-                       }
-                       FetchResult fr = realRun(dm, recursionLevel, uri, 
dontEnterImplicitArchives, localOnly);
-                       if(metadata.compressed) {
-                               Compressor codec = 
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
-                               Bucket data = fr.data;
-                               Bucket output;
-                               try {
-                                       long maxLen = ctx.maxTempLength;
-                                       if(maxLen < 0) maxLen = Long.MAX_VALUE;
-                                       output = codec.decompress(data, 
ctx.bucketFactory, maxLen);
-                               } catch (IOException e) {
-                                       throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                               } catch (CompressionOutputSizeException e) {
-                                       throw new 
FetchException(FetchException.TOO_BIG);
-                               }
-                               return new FetchResult(fr, output);
-                       }
-                       return fr;
-               } else if(metadata.isSplitfile()) {
-                       // Straight data splitfile.
-                       // Might be used by parents for something else, in 
which case they will set dontEnterImplicitArchives.
-                       dm.mergeNoOverwrite(metadata.getClientMetadata()); // 
even splitfiles can have mime types!
-                       if((!dontEnterImplicitArchives) && 
ArchiveManager.isUsableArchiveType(dm.getMIMEType()) && 
(!metaStrings.isEmpty())) {
-                               // We know target is not metadata.
-                               container = 
ctx.archiveManager.makeHandler(thisKey, 
ArchiveManager.getArchiveType(dm.getMIMEType()), false);
-                               Bucket metadataBucket = 
container.getMetadata(archiveContext, ctx, dm, recursionLevel, true);
-                               try {
-                                       metadata = 
Metadata.construct(metadataBucket);
-                               } catch (IOException e) {
-                                       throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                               }
-                               return runMetadata(dm, recursionLevel+1, key, 
metaStrings, metadata, container, thisKey, dontEnterImplicitArchives, 
localOnly);
-                       }
-                       
-                       FetcherContext newCtx;
-                       if(metadata.splitUseLengths)
-                               newCtx = new FetcherContext(ctx, 
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
-                       else
-                               newCtx = new FetcherContext(ctx, 
FetcherContext.SPLITFILE_DEFAULT_MASK);
-                       
-                       SplitFetcher sf = new SplitFetcher(metadata, 
archiveContext, newCtx, recursionLevel);
-                       Bucket sfResult = sf.fetch(); // will throw in event of 
error
-                       if(metadata.compressed) {
-                               Compressor codec = 
Compressor.getCompressionAlgorithmByMetadataID(metadata.compressionCodec);
-                               try {
-                                       long maxLen = ctx.maxTempLength;
-                                       if(maxLen < 0) maxLen = Long.MAX_VALUE;
-                                       sfResult = codec.decompress(sfResult, 
ctx.bucketFactory, maxLen);
-                               } catch (IOException e) {
-                                       throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                               } catch (CompressionOutputSizeException e) {
-                                       throw new 
FetchException(FetchException.TOO_BIG);
-                               }
-                       }
-                       return new FetchResult(dm, sfResult);
-               } else {
-                       Logger.error(this, "Don't know what to do with 
metadata: "+metadata);
-                       throw new 
FetchException(FetchException.UNKNOWN_METADATA);
-               }
-       }
-}

Modified: branches/freenet-freejvms/src/freenet/client/FetcherContext.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/FetcherContext.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/FetcherContext.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -2,52 +2,52 @@

 import freenet.client.events.ClientEventProducer;
 import freenet.crypt.RandomSource;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
 import freenet.support.BucketFactory;

 /** Context for a Fetcher. Contains all the settings a Fetcher needs to know 
about. */
 public class FetcherContext implements Cloneable {

-       static final int SPLITFILE_DEFAULT_BLOCK_MASK = 1;
-       static final int SPLITFILE_DEFAULT_MASK = 2;
-       static final int SPLITFILE_USE_LENGTHS_MASK = 3;
+       public static final int IDENTICAL_MASK = 0;
+       public static final int SPLITFILE_DEFAULT_BLOCK_MASK = 1;
+       public static final int SPLITFILE_DEFAULT_MASK = 2;
+       public static final int SPLITFILE_USE_LENGTHS_MASK = 3;
+       public static final int SET_RETURN_ARCHIVES = 4;
        /** Low-level client to send low-level requests to. */
-       final SimpleLowLevelClient client;
-       final long maxOutputLength;
-       final long maxTempLength;
-       final ArchiveManager archiveManager;
-       final BucketFactory bucketFactory;
-       final int maxRecursionLevel;
-       final int maxArchiveRestarts;
-       final boolean dontEnterImplicitArchives;
-       final int maxSplitfileThreads;
-       final int maxSplitfileBlockRetries;
-       final int maxNonSplitfileRetries;
-       final RandomSource random;
-       final boolean allowSplitfiles;
-       final boolean followRedirects;
-       final boolean localRequestOnly;
-       final ClientEventProducer eventProducer;
+       public long maxOutputLength;
+       public long maxTempLength;
+       public final ArchiveManager archiveManager;
+       public final BucketFactory bucketFactory;
+       public int maxRecursionLevel;
+       public int maxArchiveRestarts;
+       public boolean dontEnterImplicitArchives;
+       public int maxSplitfileThreads;
+       public int maxSplitfileBlockRetries;
+       public int maxNonSplitfileRetries;
+       public final RandomSource random;
+       public boolean allowSplitfiles;
+       public boolean followRedirects;
+       public boolean localRequestOnly;
+       public boolean ignoreStore;
+       public final ClientEventProducer eventProducer;
        /** Whether to allow non-full blocks, or blocks which are not direct 
CHKs, in splitfiles.
         * Set by the splitfile metadata and the mask constructor, so we don't 
need to pass it in. */
-       final boolean splitfileUseLengths;
-       final int maxMetadataSize;
-       final int maxDataBlocksPerSegment;
-       final int maxCheckBlocksPerSegment;
-       final RequestStarterClient starterClient;
-       final boolean cacheLocalRequests;
+       public boolean splitfileUseLengths;
+       public int maxMetadataSize;
+       public int maxDataBlocksPerSegment;
+       public int maxCheckBlocksPerSegment;
+       public boolean cacheLocalRequests;
+       /** If true, and we get a ZIP manifest, and we have no meta-strings 
left, then
+        * return the manifest contents as data. */
+       public boolean returnZIPManifests;

-       
-       public FetcherContext(SimpleLowLevelClient client, long curMaxLength, 
+       public FetcherContext(long curMaxLength, 
                        long curMaxTempLength, int maxMetadataSize, int 
maxRecursionLevel, int maxArchiveRestarts,
                        boolean dontEnterImplicitArchives, int 
maxSplitfileThreads,
                        int maxSplitfileBlockRetries, int 
maxNonSplitfileRetries,
                        boolean allowSplitfiles, boolean followRedirects, 
boolean localRequestOnly,
                        int maxDataBlocksPerSegment, int 
maxCheckBlocksPerSegment,
                        RandomSource random, ArchiveManager archiveManager, 
BucketFactory bucketFactory,
-                       ClientEventProducer producer, RequestStarterClient 
starter, boolean cacheLocalRequests) {
-               this.client = client;
+                       ClientEventProducer producer, boolean 
cacheLocalRequests) {
                this.maxOutputLength = curMaxLength;
                this.maxTempLength = curMaxTempLength;
                this.maxMetadataSize = maxMetadataSize;
@@ -67,18 +67,38 @@
                this.eventProducer = producer;
                this.maxDataBlocksPerSegment = maxDataBlocksPerSegment;
                this.maxCheckBlocksPerSegment = maxCheckBlocksPerSegment;
-               this.starterClient = starter;
                this.cacheLocalRequests = cacheLocalRequests;
        }

        public FetcherContext(FetcherContext ctx, int maskID) {
-               if(maskID == SPLITFILE_DEFAULT_BLOCK_MASK) {
-                       this.client = ctx.client;
+               if(maskID == IDENTICAL_MASK) {
                        this.maxOutputLength = ctx.maxOutputLength;
                        this.maxMetadataSize = ctx.maxMetadataSize;
                        this.maxTempLength = ctx.maxTempLength;
                        this.archiveManager = ctx.archiveManager;
                        this.bucketFactory = ctx.bucketFactory;
+                       this.maxRecursionLevel = ctx.maxRecursionLevel;
+                       this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+                       this.dontEnterImplicitArchives = 
ctx.dontEnterImplicitArchives;
+                       this.random = ctx.random;
+                       this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+                       this.maxSplitfileBlockRetries = 
ctx.maxSplitfileBlockRetries;
+                       this.maxNonSplitfileRetries = 
ctx.maxNonSplitfileRetries;
+                       this.allowSplitfiles = ctx.allowSplitfiles;
+                       this.followRedirects = ctx.followRedirects;
+                       this.localRequestOnly = ctx.localRequestOnly;
+                       this.splitfileUseLengths = ctx.splitfileUseLengths;
+                       this.eventProducer = ctx.eventProducer;
+                       this.maxDataBlocksPerSegment = 
ctx.maxDataBlocksPerSegment;
+                       this.maxCheckBlocksPerSegment = 
ctx.maxCheckBlocksPerSegment;
+                       this.cacheLocalRequests = ctx.cacheLocalRequests;
+                       this.returnZIPManifests = ctx.returnZIPManifests;
+               } else if(maskID == SPLITFILE_DEFAULT_BLOCK_MASK) {
+                       this.maxOutputLength = ctx.maxOutputLength;
+                       this.maxMetadataSize = ctx.maxMetadataSize;
+                       this.maxTempLength = ctx.maxTempLength;
+                       this.archiveManager = ctx.archiveManager;
+                       this.bucketFactory = ctx.bucketFactory;
                        this.maxRecursionLevel = 1;
                        this.maxArchiveRestarts = 0;
                        this.dontEnterImplicitArchives = true;
@@ -93,10 +113,9 @@
                        this.eventProducer = ctx.eventProducer;
                        this.maxDataBlocksPerSegment = 0;
                        this.maxCheckBlocksPerSegment = 0;
-                       this.starterClient = ctx.starterClient;
                        this.cacheLocalRequests = ctx.cacheLocalRequests;
+                       this.returnZIPManifests = false;
                } else if(maskID == SPLITFILE_DEFAULT_MASK) {
-                       this.client = ctx.client;
                        this.maxOutputLength = ctx.maxOutputLength;
                        this.maxTempLength = ctx.maxTempLength;
                        this.maxMetadataSize = ctx.maxMetadataSize;
@@ -116,10 +135,9 @@
                        this.eventProducer = ctx.eventProducer;
                        this.maxDataBlocksPerSegment = 
ctx.maxDataBlocksPerSegment;
                        this.maxCheckBlocksPerSegment = 
ctx.maxCheckBlocksPerSegment;
-                       this.starterClient = ctx.starterClient;
                        this.cacheLocalRequests = ctx.cacheLocalRequests;
+                       this.returnZIPManifests = ctx.returnZIPManifests;
                } else if(maskID == SPLITFILE_USE_LENGTHS_MASK) {
-                       this.client = ctx.client;
                        this.maxOutputLength = ctx.maxOutputLength;
                        this.maxTempLength = ctx.maxTempLength;
                        this.maxMetadataSize = ctx.maxMetadataSize;
@@ -139,9 +157,32 @@
                        this.eventProducer = ctx.eventProducer;
                        this.maxDataBlocksPerSegment = 
ctx.maxDataBlocksPerSegment;
                        this.maxCheckBlocksPerSegment = 
ctx.maxCheckBlocksPerSegment;
-                       this.starterClient = ctx.starterClient;
                        this.cacheLocalRequests = ctx.cacheLocalRequests;
-               } else throw new IllegalArgumentException();
+                       this.returnZIPManifests = ctx.returnZIPManifests;
+               } else if (maskID == SET_RETURN_ARCHIVES) {
+                       this.maxOutputLength = ctx.maxOutputLength;
+                       this.maxMetadataSize = ctx.maxMetadataSize;
+                       this.maxTempLength = ctx.maxTempLength;
+                       this.archiveManager = ctx.archiveManager;
+                       this.bucketFactory = ctx.bucketFactory;
+                       this.maxRecursionLevel = ctx.maxRecursionLevel;
+                       this.maxArchiveRestarts = ctx.maxArchiveRestarts;
+                       this.dontEnterImplicitArchives = 
ctx.dontEnterImplicitArchives;
+                       this.random = ctx.random;
+                       this.maxSplitfileThreads = ctx.maxSplitfileThreads;
+                       this.maxSplitfileBlockRetries = 
ctx.maxSplitfileBlockRetries;
+                       this.maxNonSplitfileRetries = 
ctx.maxNonSplitfileRetries;
+                       this.allowSplitfiles = ctx.allowSplitfiles;
+                       this.followRedirects = ctx.followRedirects;
+                       this.localRequestOnly = ctx.localRequestOnly;
+                       this.splitfileUseLengths = ctx.splitfileUseLengths;
+                       this.eventProducer = ctx.eventProducer;
+                       this.maxDataBlocksPerSegment = 
ctx.maxDataBlocksPerSegment;
+                       this.maxCheckBlocksPerSegment = 
ctx.maxCheckBlocksPerSegment;
+                       this.cacheLocalRequests = ctx.cacheLocalRequests;
+                       this.returnZIPManifests = true;
+               }
+               else throw new IllegalArgumentException();
        }

        /** Make public, but just call parent for a field for field copy */

Deleted: branches/freenet-freejvms/src/freenet/client/FileInserter.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/FileInserter.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/FileInserter.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,196 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-
-import freenet.client.events.BlockInsertErrorEvent;
-import freenet.client.events.SimpleBlockPutEvent;
-import freenet.keys.CHKEncodeException;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.FreenetURI;
-import freenet.keys.NodeCHK;
-import freenet.node.LowLevelPutException;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-/**
- * Class that does high-level inserts.
- */
-public class FileInserter {
-
-       InserterContext ctx;
-
-       public FileInserter(InserterContext context) {
-               this.ctx = context;
-       }
-
-       /**
-        * Do an insert.
-        * @param block The data to insert.
-        * @param localOnly 
-        * @return The URI of the inserted data.
-        * @throws InserterException 
-        */
-       public FreenetURI run(InsertBlock block, boolean metadata, boolean 
getCHKOnly, boolean noRetries) throws InserterException {
-               if(block.data == null)
-                       throw new NullPointerException();
-               if(!block.desiredURI.toString(false).equals("CHK@"))
-                       throw new 
InserterException(InserterException.INVALID_URI, null);
-               
-               // Insert the content.
-               // If we have reason to create a metadata document, include the 
client metadata.
-               // Otherwise only create one (a redirect) with the client 
metadata, if there is any.
-               
-               // First, can it fit into a single block?
-               
-               Bucket data = block.data;
-               ClientCHKBlock chk;
-
-               Compressor bestCodec = null;
-               Bucket bestCompressedData = null;
-
-               long origSize = data.size();
-               if(data.size() > NodeCHK.BLOCK_SIZE && (!ctx.dontCompress)) {
-                       // Try to compress the data.
-                       // Try each algorithm, starting with the fastest and 
weakest.
-                       // Stop when run out of algorithms, or the compressed 
data fits in a single block.
-                       int algos = Compressor.countCompressAlgorithms();
-                       try {
-                               for(int i=0;i<algos;i++) {
-                                       Compressor comp = 
Compressor.getCompressionAlgorithmByDifficulty(i);
-                                       Bucket result;
-                                       result = comp.compress(data, ctx.bf, 
Long.MAX_VALUE);
-                                       if(result.size() < NodeCHK.BLOCK_SIZE) {
-                                               bestCodec = comp;
-                                               data = result;
-                                               if(bestCompressedData != null)
-                                                       
ctx.bf.freeBucket(bestCompressedData);
-                                               break;
-                                       }
-                                       if(bestCompressedData != null && 
result.size() <  bestCompressedData.size()) {
-                                               
ctx.bf.freeBucket(bestCompressedData);
-                                               bestCompressedData = result;
-                                               bestCodec = comp;
-                                       } else if(bestCompressedData == null && 
result.size() < data.size()) {
-                                               bestCompressedData = result;
-                                               bestCodec = comp;
-                                       }
-                               }
-                       } catch (IOException e) {
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       } catch (CompressionOutputSizeException e) {
-                               // Impossible
-                               throw new Error(e);
-                       }
-               }
-               
-               if(data.size() <= NodeCHK.BLOCK_SIZE) {
-                       try {
-                               if(bestCodec == null) {
-                                       chk = ClientCHKBlock.encode(data, 
metadata, true, (short)-1, 0);
-                               } else {
-                                       if(origSize > 
ClientCHKBlock.MAX_LENGTH_BEFORE_COMPRESSION)
-                                               throw new 
IllegalArgumentException("Data too big to compress into single block, but it 
does");
-                                       chk = ClientCHKBlock.encode(data, 
metadata, false, bestCodec.codecNumberForMetadata(), (int)origSize);
-                               }
-                       } catch (IOException e) {
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       } catch (CHKEncodeException e) {
-                               Logger.error(this, "Unexpected error: "+e, e);
-                               throw new 
InserterException(InserterException.INTERNAL_ERROR, null);
-                       }
-                       return simplePutCHK(chk, block.clientMetadata, 
getCHKOnly, noRetries);
-               }
-               
-               // Too big, encode to a splitfile
-               SplitInserter splitInsert = new SplitInserter(data, 
block.clientMetadata, bestCodec, ctx.splitfileAlgorithm, ctx, this, 
NodeCHK.BLOCK_SIZE, getCHKOnly, metadata);
-               return splitInsert.run();
-       }
-
-       /**
-        * Simple insert. Only complication is that it might have some client 
metadata.
-        * @param chk The data encoded into a single CHK.
-        * @param clientMetadata The client metadata. If this is non-trivial, 
we will have to
-        * create a redirect document just to put the metadata in.
-        * @return The URI of the resulting CHK.
-        * @throws InserterException If there was an error inserting the block.
-        */
-       private FreenetURI simplePutCHK(ClientCHKBlock chk, ClientMetadata 
clientMetadata, boolean getCHKOnly, boolean noRetries) throws InserterException 
{
-               LowLevelPutException le = null;
-               int rnfs = 0;
-               for(int i=0;i<=ctx.maxInsertRetries;i++) {
-                       try {
-                               if(!getCHKOnly)
-                                       ctx.eventProducer.produceEvent(new 
SimpleBlockPutEvent(chk.getClientKey()));
-                               if(!getCHKOnly)
-                                       ctx.client.putCHK(chk, 
ctx.starterClient, ctx.cacheLocalRequests);
-                               break;
-                       } catch (LowLevelPutException e) {
-                               le = e;
-                               switch(le.code) {
-                               case 
LowLevelPutException.ROUTE_REALLY_NOT_FOUND:
-                               case LowLevelPutException.REJECTED_OVERLOAD:
-                                       rnfs = 0;
-                               }
-                               if(noRetries)
-                                       break;
-                               if(le.code == 
LowLevelPutException.ROUTE_NOT_FOUND && ctx.consecutiveRNFsCountAsSuccess > 0) {
-                                       rnfs++;
-                                       if(rnfs >= 
ctx.consecutiveRNFsCountAsSuccess) {
-                                               le = null;
-                                               break;
-                                       }
-                               }
-                       }
-               }
-               
-               FreenetURI uri;
-               
-               if(clientMetadata == null || clientMetadata.isTrivial())
-                       // Don't need a redirect for the metadata
-                        uri = chk.getClientKey().getURI();
-               else {
-                       // Do need a redirect for the metadata
-                       Metadata metadata = new 
Metadata(Metadata.SIMPLE_REDIRECT, chk.getClientKey().getURI(), clientMetadata);
-                       uri = putMetadataCHK(metadata, getCHKOnly, noRetries);
-               }
-               
-               if(le != null)
-                       translateException(le, uri);
-               
-               return uri;
-       }
-
-       private void translateException(LowLevelPutException e, FreenetURI uri) 
throws InserterException {
-               switch(e.code) {
-               case LowLevelPutException.INTERNAL_ERROR:
-                       throw new 
InserterException(InserterException.INTERNAL_ERROR, e, null);
-               case LowLevelPutException.REJECTED_OVERLOAD:
-                       throw new 
InserterException(InserterException.REJECTED_OVERLOAD, uri);
-               case LowLevelPutException.ROUTE_NOT_FOUND:
-                       throw new 
InserterException(InserterException.ROUTE_NOT_FOUND, uri);
-               case LowLevelPutException.ROUTE_REALLY_NOT_FOUND:
-                       throw new 
InserterException(InserterException.ROUTE_REALLY_NOT_FOUND, uri);
-               default:
-                       Logger.error(this, "Unknown LowLevelPutException code: 
"+e.code+" on "+this);
-                       throw new 
InserterException(InserterException.INTERNAL_ERROR, e, null);
-               }
-       }
-
-       /** Put a metadata CHK 
-        * @throws InserterException If the insert fails.
-        */
-       private FreenetURI putMetadataCHK(Metadata metadata, boolean 
getCHKOnly, boolean noRetries) throws InserterException {
-               byte[] data = metadata.writeToByteArray();
-               Bucket bucket;
-               try {
-                       bucket = BucketTools.makeImmutableBucket(ctx.bf, data);
-               } catch (IOException e) {
-                       throw new 
InserterException(InserterException.BUCKET_ERROR, null);
-               }
-               InsertBlock block = new InsertBlock(bucket, null, 
FreenetURI.EMPTY_CHK_URI);
-               return run(block, true, getCHKOnly, noRetries);
-       }
-}

Modified: 
branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClient.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClient.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClient.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,8 +1,9 @@
 package freenet.client;

+import java.util.HashMap;
+
 import freenet.client.events.ClientEventListener;
 import freenet.keys.FreenetURI;
-import freenet.node.RequestStarterClient;

 public interface HighLevelSimpleClient {

@@ -23,12 +24,26 @@
        public FetchResult fetch(FreenetURI uri) throws FetchException;

        /**
-        * Blocking insert of a URI
+        * Blocking insert.
         * @throws InserterException If there is an error inserting the data
         */
        public FreenetURI insert(InsertBlock insert, boolean getCHKOnly) throws 
InserterException;
+
+       /**
+        * Blocking insert of a redirect.
+        */
+       public FreenetURI insertRedirect(FreenetURI insertURI, FreenetURI 
target) throws InserterException;

        /**
+        * Blocking insert of multiple files as a manifest (or zip manifest, 
etc).
+        */
+       public FreenetURI insertManifest(FreenetURI insertURI, HashMap 
bucketsByName, String defaultName) throws InserterException;
+       
+       public FetcherContext getFetcherContext();
+
+       public InserterContext getInserterContext();
+       
+       /**
         * Add a ClientEventListener.
         */
        public void addGlobalHook(ClientEventListener listener);

Modified: 
branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClientImpl.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/HighLevelSimpleClientImpl.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,29 +1,36 @@
 package freenet.client;

+import java.io.IOException;
+import java.util.HashMap;
+
+import freenet.client.async.ClientGetter;
+import freenet.client.async.ClientPutter;
+import freenet.client.async.SimpleManifestPutter;
 import freenet.client.events.ClientEventListener;
 import freenet.client.events.ClientEventProducer;
 import freenet.client.events.EventLogger;
 import freenet.client.events.SimpleEventProducer;
 import freenet.crypt.RandomSource;
+import freenet.keys.ClientKey;
 import freenet.keys.FreenetURI;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
+import freenet.node.Node;
+import freenet.support.Bucket;
 import freenet.support.BucketFactory;
+import freenet.support.BucketTools;
 import freenet.support.Logger;

 public class HighLevelSimpleClientImpl implements HighLevelSimpleClient {

-       private final SimpleLowLevelClient client;
        private final ArchiveManager archiveManager;
+       private final short priorityClass;
        private final BucketFactory bucketFactory;
+       private final Node node;
        /** One CEP for all requests and inserts */
        private final ClientEventProducer globalEventProducer;
        private long curMaxLength;
        private long curMaxTempLength;
        private int curMaxMetadataLength;
        private final RandomSource random;
-       private final RequestStarterClient requestStarter;
-       private final RequestStarterClient insertStarter;
        /** See comments in Node */
        private final boolean cacheLocalRequests;
        static final int MAX_RECURSION = 10;
@@ -58,9 +65,10 @@
        static final int SPLITFILE_CHECK_BLOCKS_PER_SEGMENT = 64;


-       public HighLevelSimpleClientImpl(SimpleLowLevelClient client, 
ArchiveManager mgr, BucketFactory bf, RandomSource r, RequestStarterClient 
requestStarterClient, RequestStarterClient insertStarterClient, boolean 
cacheLocalRequests) {
-               this.client = client;
+       public HighLevelSimpleClientImpl(Node node, ArchiveManager mgr, 
BucketFactory bf, RandomSource r, boolean cacheLocalRequests, short 
priorityClass) {
+               this.node = node;
                archiveManager = mgr;
+               this.priorityClass = priorityClass;
                bucketFactory = bf;
                random = r;
                this.globalEventProducer = new SimpleEventProducer();
@@ -68,8 +76,6 @@
                curMaxLength = Long.MAX_VALUE;
                curMaxTempLength = Long.MAX_VALUE;
                curMaxMetadataLength = 1024 * 1024;
-               this.requestStarter = requestStarterClient;
-               this.insertStarter = insertStarterClient;
                this.cacheLocalRequests = cacheLocalRequests;
        }

@@ -86,31 +92,63 @@
         */
        public FetchResult fetch(FreenetURI uri) throws FetchException {
                if(uri == null) throw new NullPointerException();
-               FetcherContext context = new FetcherContext(client, 
curMaxLength, curMaxTempLength, curMaxMetadataLength, 
-                               MAX_RECURSION, MAX_ARCHIVE_RESTARTS, 
DONT_ENTER_IMPLICIT_ARCHIVES, 
-                               SPLITFILE_THREADS, SPLITFILE_BLOCK_RETRIES, 
NON_SPLITFILE_RETRIES,
-                               FETCH_SPLITFILES, FOLLOW_REDIRECTS, 
LOCAL_REQUESTS_ONLY,
-                               MAX_SPLITFILE_BLOCKS_PER_SEGMENT, 
MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
-                               random, archiveManager, bucketFactory, 
globalEventProducer, requestStarter, cacheLocalRequests);
-               Fetcher f = new Fetcher(uri, context);
-               return f.run();
+               FetcherContext context = getFetcherContext();
+               FetchWaiter fw = new FetchWaiter();
+               ClientGetter get = new ClientGetter(fw, node.fetchScheduler, 
uri, context, priorityClass, this);
+               get.start();
+               return fw.waitForCompletion();
        }

        public FreenetURI insert(InsertBlock insert, boolean getCHKOnly) throws 
InserterException {
-               InserterContext context = new InserterContext(client, 
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
-                               SPLITFILE_INSERT_THREADS, 
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT, 
globalEventProducer, insertStarter, cacheLocalRequests);
-               FileInserter i = new FileInserter(context);
-               return i.run(insert, false, getCHKOnly, false);
+               return insert(insert, getCHKOnly, false);
        }
+       
+       public FreenetURI insert(InsertBlock insert, boolean getCHKOnly, 
boolean isMetadata) throws InserterException {
+               InserterContext context = getInserterContext();
+               PutWaiter pw = new PutWaiter();
+               ClientPutter put = new ClientPutter(pw, insert.data, 
insert.desiredURI, insert.clientMetadata, 
+                               context, node.putScheduler, priorityClass, 
getCHKOnly, isMetadata, this);
+               put.start();
+               return pw.waitForCompletion();
+       }

-       public FreenetURI insert(InsertBlock insert, boolean getCHKOnly, 
boolean metadata) throws InserterException {
-               InserterContext context = new InserterContext(client, 
bucketFactory, random, INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
-                               SPLITFILE_INSERT_THREADS, 
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT, 
globalEventProducer, insertStarter, cacheLocalRequests);
-               FileInserter i = new FileInserter(context);
-               return i.run(insert, metadata, getCHKOnly, false);
+       public FreenetURI insertRedirect(FreenetURI insertURI, FreenetURI 
targetURI) throws InserterException {
+               Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT, targetURI, 
new ClientMetadata());
+               Bucket b;
+               try {
+                       b = BucketTools.makeImmutableBucket(bucketFactory, 
m.writeToByteArray());
+               } catch (IOException e) {
+                       Logger.error(this, "Bucket error: "+e);
+                       throw new 
InserterException(InserterException.INTERNAL_ERROR, e, null);
+               }
+               ClientKey k;
+               InsertBlock block = new InsertBlock(b, null, insertURI);
+               return insert(block, false, true);
        }

+       public FreenetURI insertManifest(FreenetURI insertURI, HashMap 
bucketsByName, String defaultName) throws InserterException {
+               PutWaiter pw = new PutWaiter();
+               SimpleManifestPutter putter =
+                       new SimpleManifestPutter(pw, node.putScheduler, 
bucketsByName, priorityClass, insertURI, defaultName, getInserterContext(), 
false, this);
+               return pw.waitForCompletion();
+       }
+       
        public void addGlobalHook(ClientEventListener listener) {
                globalEventProducer.addEventListener(listener);
        }
+
+       public FetcherContext getFetcherContext() {
+               return                  
+                       new FetcherContext(curMaxLength, curMaxTempLength, 
curMaxMetadataLength, 
+                               MAX_RECURSION, MAX_ARCHIVE_RESTARTS, 
DONT_ENTER_IMPLICIT_ARCHIVES, 
+                               SPLITFILE_THREADS, SPLITFILE_BLOCK_RETRIES, 
NON_SPLITFILE_RETRIES,
+                               FETCH_SPLITFILES, FOLLOW_REDIRECTS, 
LOCAL_REQUESTS_ONLY,
+                               MAX_SPLITFILE_BLOCKS_PER_SEGMENT, 
MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
+                               random, archiveManager, bucketFactory, 
globalEventProducer, cacheLocalRequests);
+       }
+
+       public InserterContext getInserterContext() {
+               return new InserterContext(bucketFactory, random, 
INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
+                               SPLITFILE_INSERT_THREADS, 
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT, 
globalEventProducer, cacheLocalRequests);
+       }
 }

Modified: branches/freenet-freejvms/src/freenet/client/InsertBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/InsertBlock.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/InsertBlock.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -9,15 +9,20 @@
 public class InsertBlock {

        Bucket data;
-       final FreenetURI desiredURI;
-       final ClientMetadata clientMetadata;
+       public final FreenetURI desiredURI;
+       public final ClientMetadata clientMetadata;

        public InsertBlock(Bucket data, ClientMetadata metadata, FreenetURI 
desiredURI) {
                this.data = data;
-               clientMetadata = metadata;
+               if(metadata == null)
+                       clientMetadata = new ClientMetadata();
+               else
+                       clientMetadata = metadata;
                this.desiredURI = desiredURI;
        }
+       
+       public Bucket getData() {
+               return data;
+       }

-
-
 }

Deleted: branches/freenet-freejvms/src/freenet/client/InsertSegment.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/InsertSegment.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/InsertSegment.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,69 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-
-import freenet.keys.FreenetURI;
-import freenet.support.BucketFactory;
-import freenet.support.Logger;
-
-/**
- * Segment of a splitfile, for insertion purposes.
- */
-public class InsertSegment {
-
-       final FECCodec codec;
-       final SplitfileBlock[] origDataBlocks;
-       final int blockLength;
-       final BucketFactory bf;
-       /** Check blocks. Will be created by encode(...). */
-       final SplitfileBlock[] checkBlocks;
-       final boolean getCHKOnly;
-       // just for debugging
-       final int segNo;
-       
-       public InsertSegment(short splitfileAlgo, SplitfileBlock[] 
origDataBlocks, int blockLength, BucketFactory bf, boolean getCHKOnly, int 
segNo) {
-               this.origDataBlocks = origDataBlocks;
-               codec = FECCodec.getCodec(splitfileAlgo, origDataBlocks.length);
-               if(codec != null)
-                       checkBlocks = new 
SplitfileBlock[codec.countCheckBlocks()];
-               else
-                       checkBlocks = new SplitfileBlock[0];
-               this.blockLength = blockLength;
-               this.bf = bf;
-               this.getCHKOnly = getCHKOnly;
-               this.segNo = segNo;
-               // FIXME: remove debugging code
-               for(int i=0;i<origDataBlocks.length;i++)
-                       if(origDataBlocks[i].getData() == null) throw new 
NullPointerException("Block "+i+" of "+origDataBlocks.length+" data blocks of 
seg "+segNo+" is null");
-       }
-
-       /**
-        * Get the check block URIs.
-        * Don't call before encode()! Don't call before all blocks have 
inserted either.
-        */
-       public FreenetURI[] getCheckURIs() {
-               FreenetURI[] uris = new FreenetURI[checkBlocks.length];
-               for(int i=0;i<uris.length;i++) {
-                       FreenetURI uri = checkBlocks[i].getURI();
-                       uris[i] = uri;
-               }
-               return uris;
-       }
-
-       /**
-        * Encode the data blocks into check blocks.
-        * @return The number of check blocks generated.
-        * @throws IOException If the encode fails due to a bucket error.
-        */
-       public int encode(int offset, RetryTracker tracker, InserterContext 
ctx) throws IOException {
-               Logger.minor(this, "Encoding "+segNo+": 
"+origDataBlocks.length+" into "+checkBlocks.length);
-               if(codec == null) return 0; // no FEC
-               for(int i=0;i<checkBlocks.length;i++)
-                       checkBlocks[i] = new BlockInserter(null, offset + i, 
tracker, ctx, getCHKOnly);
-               codec.encode(origDataBlocks, checkBlocks, blockLength, bf);
-               for(int i=0;i<checkBlocks.length;i++)
-                       tracker.addBlock(checkBlocks[i]);
-               return checkBlocks.length;
-       }
-
-}

Modified: branches/freenet-freejvms/src/freenet/client/InserterContext.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/InserterContext.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/InserterContext.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,34 +1,30 @@
 package freenet.client;

 import freenet.client.events.ClientEventProducer;
+import freenet.client.events.SimpleEventProducer;
 import freenet.crypt.RandomSource;
-import freenet.node.RequestStarterClient;
-import freenet.node.SimpleLowLevelClient;
 import freenet.support.BucketFactory;

 /** Context object for an insert operation, including both simple and 
multi-file inserts */
 public class InserterContext {

-       final SimpleLowLevelClient client;
-       final BucketFactory bf;
+       public final BucketFactory bf;
        /** If true, don't try to compress the data */
-       final boolean dontCompress;
-       final RandomSource random;
-       final short splitfileAlgorithm;
-       final int maxInsertRetries;
+       public final boolean dontCompress;
+       public final RandomSource random;
+       public final short splitfileAlgorithm;
+       public int maxInsertRetries;
        final int maxSplitInsertThreads;
-       final int consecutiveRNFsCountAsSuccess;
-       final int splitfileSegmentDataBlocks;
-       final int splitfileSegmentCheckBlocks;
-       final ClientEventProducer eventProducer;
-       final RequestStarterClient starterClient;
+       public final int consecutiveRNFsCountAsSuccess;
+       public final int splitfileSegmentDataBlocks;
+       public final int splitfileSegmentCheckBlocks;
+       public final ClientEventProducer eventProducer;
        /** Interesting tradeoff, see comments at top of Node.java. */
-       final boolean cacheLocalRequests;
+       public final boolean cacheLocalRequests;

-       public InserterContext(SimpleLowLevelClient client, BucketFactory bf, 
RandomSource random,
+       public InserterContext(BucketFactory bf, RandomSource random,
                        int maxRetries, int rnfsToSuccess, int maxThreads, int 
splitfileSegmentDataBlocks, int splitfileSegmentCheckBlocks,
-                       ClientEventProducer eventProducer, RequestStarterClient 
sctx, boolean cacheLocalRequests) {
-               this.client = client;
+                       ClientEventProducer eventProducer, boolean 
cacheLocalRequests) {
                this.bf = bf;
                this.random = random;
                dontCompress = false;
@@ -39,8 +35,35 @@
                this.eventProducer = eventProducer;
                this.splitfileSegmentDataBlocks = splitfileSegmentDataBlocks;
                this.splitfileSegmentCheckBlocks = splitfileSegmentCheckBlocks;
-               this.starterClient = sctx;
                this.cacheLocalRequests = cacheLocalRequests;
        }

+       public InserterContext(InserterContext ctx) {
+               this.bf = ctx.bf;
+               this.random = ctx.random;
+               this.dontCompress = ctx.dontCompress;
+               this.splitfileAlgorithm = ctx.splitfileAlgorithm;
+               this.consecutiveRNFsCountAsSuccess = 
ctx.consecutiveRNFsCountAsSuccess;
+               this.maxInsertRetries = ctx.maxInsertRetries;
+               this.maxSplitInsertThreads = ctx.maxSplitInsertThreads;
+               this.eventProducer = ctx.eventProducer;
+               this.splitfileSegmentDataBlocks = 
ctx.splitfileSegmentDataBlocks;
+               this.splitfileSegmentCheckBlocks = 
ctx.splitfileSegmentCheckBlocks;
+               this.cacheLocalRequests = ctx.cacheLocalRequests;
+       }
+
+       public InserterContext(InserterContext ctx, SimpleEventProducer 
producer) {
+               this.bf = ctx.bf;
+               this.random = ctx.random;
+               this.dontCompress = ctx.dontCompress;
+               this.splitfileAlgorithm = ctx.splitfileAlgorithm;
+               this.consecutiveRNFsCountAsSuccess = 
ctx.consecutiveRNFsCountAsSuccess;
+               this.maxInsertRetries = ctx.maxInsertRetries;
+               this.maxSplitInsertThreads = ctx.maxSplitInsertThreads;
+               this.eventProducer = producer;
+               this.splitfileSegmentDataBlocks = 
ctx.splitfileSegmentDataBlocks;
+               this.splitfileSegmentCheckBlocks = 
ctx.splitfileSegmentCheckBlocks;
+               this.cacheLocalRequests = ctx.cacheLocalRequests;
+       }
+
 }

Modified: branches/freenet-freejvms/src/freenet/client/InserterException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/InserterException.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/InserterException.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -10,8 +10,10 @@
        /** For collection errors */
        public FailureCodeTracker errorCodes;
        /** If a non-serious error, the URI */
-       public final FreenetURI uri;
+       public FreenetURI uri;

+       public final String extra;
+       
        /** Get the failure mode. */
        public int getMode() {
                return mode;
@@ -19,6 +21,7 @@

        public InserterException(int m, String msg, FreenetURI expectedURI) {
                super(getMessage(m)+": "+msg);
+               extra = msg;
                mode = m;
                Logger.minor(this, "Creating InserterException: 
"+getMessage(mode)+": "+msg, this);
                errorCodes = null;
@@ -27,6 +30,7 @@

        public InserterException(int m, FreenetURI expectedURI) {
                super(getMessage(m));
+               extra = null;
                mode = m;
                Logger.minor(this, "Creating InserterException: 
"+getMessage(mode), this);
                errorCodes = null;
@@ -35,6 +39,7 @@

        public InserterException(int mode, Throwable e, FreenetURI expectedURI) 
{
                super(getMessage(mode)+": "+e.getMessage());
+               extra = e.getMessage();
                Logger.minor(this, "Creating InserterException: 
"+getMessage(mode)+": "+e, e);
                this.mode = mode;
                errorCodes = null;
@@ -44,12 +49,21 @@

        public InserterException(int mode, FailureCodeTracker errorCodes, 
FreenetURI expectedURI) {
                super(getMessage(mode));
+               extra = null;
                this.mode = mode;
                Logger.minor(this, "Creating InserterException: 
"+getMessage(mode), this);
                this.errorCodes = errorCodes;
                this.uri = expectedURI;
        }

+       public InserterException(int mode) {
+               super(getMessage(mode));
+               extra = null;
+               this.mode = mode;
+               this.errorCodes = null;
+               this.uri = null;
+       }
+
        /** Caller supplied a URI we cannot use */
        public static final int INVALID_URI = 1;
        /** Failed to read from or write to a bucket; a kind of internal error 
*/
@@ -66,6 +80,10 @@
        public static final int TOO_MANY_RETRIES_IN_BLOCKS = 7;
        /** Not able to leave the node at all */
        public static final int ROUTE_REALLY_NOT_FOUND = 8;
+       /** Collided with pre-existing content */
+       public static final int COLLISION = 9;
+       /** Cancelled by user */
+       public static final int CANCELLED = 10;

        public static String getMessage(int mode) {
                switch(mode) {
@@ -85,8 +103,80 @@
                        return "Could not propagate the insert to enough nodes 
(normal on small networks, try fetching it anyway)";
                case ROUTE_REALLY_NOT_FOUND:
                        return "Insert could not leave the node at all";
+               case COLLISION:
+                       return "Insert collided with different, pre-existing 
data at the same key";
+               case CANCELLED:
+                       return "Cancelled by user";
                default:
                        return "Unknown error "+mode;
                }
        }
+
+       public static String getShortMessage(int mode) {
+               switch(mode) {
+               case INVALID_URI:
+                       return "Invalid URI";
+               case BUCKET_ERROR:
+                       return "Temp files error";
+               case INTERNAL_ERROR:
+                       return "Internal error";
+               case REJECTED_OVERLOAD:
+                       return "Timeout or overload";
+               case FATAL_ERRORS_IN_BLOCKS:
+                       return "Some blocks failed fatally";
+               case TOO_MANY_RETRIES_IN_BLOCKS:
+                       return "Some blocks ran out of retries";
+               case ROUTE_NOT_FOUND:
+                       return "Route not found";
+               case ROUTE_REALLY_NOT_FOUND:
+                       return "Request could not leave the node";
+               case COLLISION:
+                       return "Collided with existing data";
+               case CANCELLED:
+                       return "Cancelled";
+               default:
+                       return "Unknown error "+mode;
+               }
+       }
+       
+       /** Is this error fatal? Non-fatal errors are errors which are likely 
to go away with
+        * more retries, or at least for which there is some point retrying.
+        */
+       public boolean isFatal() {
+               return isFatal(mode);
+       }
+       
+       public static boolean isFatal(int mode) {
+               switch(mode) {
+               case INVALID_URI:
+               case FATAL_ERRORS_IN_BLOCKS:
+               case COLLISION:
+               case CANCELLED:
+                       return true;
+               case BUCKET_ERROR: // maybe
+               case INTERNAL_ERROR: // maybe
+               case REJECTED_OVERLOAD:
+               case TOO_MANY_RETRIES_IN_BLOCKS:
+               case ROUTE_NOT_FOUND:
+               case ROUTE_REALLY_NOT_FOUND:
+                       return false;
+               default:
+                       Logger.error(InserterException.class, "Error unknown to 
isFatal(): "+getMessage(mode));
+                       return false;
+               }
+       }
+
+       public static InserterException construct(FailureCodeTracker errors) {
+               if(errors == null) return null;
+               if(errors.isEmpty()) return null;
+               if(errors.isOneCodeOnly()) {
+                       return new InserterException(errors.getFirstCode());
+               }
+               int mode;
+               if(errors.isFatal(true))
+                       mode = FATAL_ERRORS_IN_BLOCKS;
+               else
+                       mode = TOO_MANY_RETRIES_IN_BLOCKS;
+               return new InserterException(mode, errors, null);
+       }
 }

Modified: branches/freenet-freejvms/src/freenet/client/Metadata.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/Metadata.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/Metadata.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -19,13 +19,104 @@


 /** Metadata parser/writer class. */
-public class Metadata {
+public class Metadata implements Cloneable {

        static final long FREENET_METADATA_MAGIC = 0xf053b2842d91482bL;
        static final int MAX_SPLITFILE_PARAMS_LENGTH = 32768;
        /** Soft limit, to avoid memory DoS */
        static final int MAX_SPLITFILE_BLOCKS = 100*1000;

+       // Actual parsed data
+       
+       // document type
+       byte documentType;
+       public static final byte SIMPLE_REDIRECT = 0;
+       static final byte MULTI_LEVEL_METADATA = 1;
+       static final byte SIMPLE_MANIFEST = 2;
+       static final byte ZIP_MANIFEST = 3;
+       static final byte ZIP_INTERNAL_REDIRECT = 4;
+       
+       // 2 bytes of flags
+       /** Is a splitfile */
+       boolean splitfile;
+       /** Is a DBR */
+       boolean dbr;
+       /** No MIME type; on by default as not all doctypes have MIME */
+       boolean noMIME = true;
+       /** Compressed MIME type */
+       boolean compressedMIME;
+       /** Has extra client-metadata */
+       boolean extraMetadata;
+       /** Keys stored in full (otherwise assumed to be CHKs) */
+       boolean fullKeys;
+       /** Non-final splitfile chunks can be non-full */
+       boolean splitUseLengths;
+       static final short FLAGS_SPLITFILE = 1;
+       static final short FLAGS_DBR = 2;
+       static final short FLAGS_NO_MIME = 4;
+       static final short FLAGS_COMPRESSED_MIME = 8;
+       static final short FLAGS_EXTRA_METADATA = 16;
+       static final short FLAGS_FULL_KEYS = 32;
+       static final short FLAGS_SPLIT_USE_LENGTHS = 64;
+       static final short FLAGS_COMPRESSED = 128;
+       
+       /** Container archive type */
+       short archiveType;
+       static final short ARCHIVE_ZIP = 0;
+       static final short ARCHIVE_TAR = 1; // FIXME for future use
+       
+       /** Compressed splitfile codec */
+       short compressionCodec = -1;
+       static public final short COMPRESS_GZIP = 0;
+       static final short COMPRESS_BZIP2 = 1; // FIXME for future use
+       
+       /** The length of the splitfile */
+       long dataLength;
+       /** The decompressed length of the compressed data */
+       long decompressedLength;
+       
+       /** The MIME type, as a string */
+       String mimeType;
+       
+       /** The compressed MIME type - lookup index for the MIME types table.
+        * Must be between 0 and 32767.
+        */
+       short compressedMIMEValue;
+       boolean hasCompressedMIMEParams;
+       short compressedMIMEParams;
+       
+       /** The simple redirect key */
+       FreenetURI simpleRedirectKey;
+       
+       short splitfileAlgorithm;
+       static public final short SPLITFILE_NONREDUNDANT = 0;
+       static public final short SPLITFILE_ONION_STANDARD = 1;
+       
+       /** Splitfile parameters */
+       byte[] splitfileParams;
+       int splitfileBlocks;
+       int splitfileCheckBlocks;
+       FreenetURI[] splitfileDataKeys;
+       FreenetURI[] splitfileCheckKeys;
+       
+       // Manifests
+       int manifestEntryCount;
+       /** Manifest entries by name */
+       HashMap manifestEntries;
+       
+       /** ZIP internal redirect: name of file in ZIP */
+       String nameInArchive;
+
+       ClientMetadata clientMetadata;
+
+       public Object clone() {
+               try {
+                       return super.clone();
+               } catch (CloneNotSupportedException e) {
+                       throw new Error("Yes it is!");
+               }
+       }
+       
        /** Parse a block of bytes into a Metadata structure.
         * Constructor method because of need to catch impossible exceptions.
         * @throws MetadataParseException If the metadata is invalid.
@@ -76,6 +167,8 @@
                if(documentType < 0 || documentType > 5 || 
                                (documentType == ZIP_INTERNAL_REDIRECT && 
!acceptZipInternalRedirects))
                        throw new MetadataParseException("Unsupported document 
type: "+documentType);
+               
+               boolean compressed = false;
                if(documentType == SIMPLE_REDIRECT || documentType == 
MULTI_LEVEL_METADATA
                                || documentType == ZIP_MANIFEST) {
                        short flags = dis.readShort();
@@ -115,7 +208,7 @@
                }

                if(noMIME) {
-                       mimeType = DefaultMIMETypes.DEFAULT_MIME_TYPE;
+                       mimeType = null;
                        Logger.minor(this, "noMIME enabled");
                } else {
                        if(compressedMIME) {
@@ -290,7 +383,35 @@
                ret.addRedirectionManifest(dir);
                return ret;
        }
+
+       /**
+        * Create a Metadata object and add manifest entries from the given 
array. */
+       public static Metadata mkRedirectionManifestWithMetadata(HashMap dir) {
+               Metadata ret = new Metadata();
+               ret.addRedirectionManifestWithMetadata(dir);
+               return ret;
+       }

+       private void addRedirectionManifestWithMetadata(HashMap dir) {
+               // Simple manifest - contains actual redirects.
+               // Not zip manifest, which is basically a redirect.
+               documentType = SIMPLE_MANIFEST;
+               noMIME = true;
+               //mimeType = null;
+               //clientMetadata = new ClientMetadata(null);
+               manifestEntries = new HashMap();
+               int count = 0;
+               for(Iterator i = dir.keySet().iterator();i.hasNext();) {
+                       String key = (String) i.next();
+                       count++;
+                       byte[] data = (byte[]) dir.get(key);
+                       if(data == null)
+                               throw new NullPointerException();
+                       manifestEntries.put(key, data);
+               }
+               manifestEntryCount = count;
+       }
+
        /**
         * Create a Metadata object for an archive which does not have its own
         * metadata.
@@ -383,10 +504,12 @@
                if(docType == SIMPLE_REDIRECT) {
                        documentType = docType;
                        clientMetadata = cm;
-                       if(cm != null)
+                       if(cm != null && !cm.isTrivial()) {
                                setMIMEType(cm.getMIMEType());
-                       else
+                       } else {
                                setMIMEType(DefaultMIMETypes.DEFAULT_MIME_TYPE);
+                               noMIME = true;
+                       }
                        simpleRedirectKey = uri;
                } else
                        throw new IllegalArgumentException();
@@ -431,7 +554,7 @@
        /**
         * Write the data to a byte array.
         */
-       byte[] writeToByteArray() {
+       public byte[] writeToByteArray() {
                ByteArrayOutputStream baos = new ByteArrayOutputStream();
                DataOutputStream dos = new DataOutputStream(baos);
                try {
@@ -475,91 +598,6 @@
                        } else throw new IllegalArgumentException("Full keys 
must be enabled to write non-CHKs");
                }
        }
-       // Actual parsed data
-       
-       // document type
-       byte documentType;
-       static final byte SIMPLE_REDIRECT = 0;
-       static final byte MULTI_LEVEL_METADATA = 1;
-       static final byte SIMPLE_MANIFEST = 2;
-       static final byte ZIP_MANIFEST = 3;
-       static final byte ZIP_INTERNAL_REDIRECT = 4;
-       
-       // 2 bytes of flags
-       /** Is a splitfile */
-       boolean splitfile;
-       /** Is a DBR */
-       boolean dbr;
-       /** No MIME type; on by default as not all doctypes have MIME */
-       boolean noMIME = true;
-       /** Compressed MIME type */
-       boolean compressedMIME;
-       /** Has extra client-metadata */
-       boolean extraMetadata;
-       /** Keys stored in full (otherwise assumed to be CHKs) */
-       boolean fullKeys;
-       /** Non-final splitfile chunks can be non-full */
-       boolean splitUseLengths;
-       /** Compressed splitfile */
-       boolean compressed;
-       static final short FLAGS_SPLITFILE = 1;
-       static final short FLAGS_DBR = 2;
-       static final short FLAGS_NO_MIME = 4;
-       static final short FLAGS_COMPRESSED_MIME = 8;
-       static final short FLAGS_EXTRA_METADATA = 16;
-       static final short FLAGS_FULL_KEYS = 32;
-       static final short FLAGS_SPLIT_USE_LENGTHS = 64;
-       static final short FLAGS_COMPRESSED = 128;
-       
-       /** Container archive type */
-       short archiveType;
-       static final short ARCHIVE_ZIP = 0;
-       static final short ARCHIVE_TAR = 1; // FIXME for future use
-       
-       /** Compressed splitfile codec */
-       short compressionCodec;
-       static public final short COMPRESS_GZIP = 0; // for future use
-       static final short COMPRESS_BZIP2 = 1; // FIXME for future use
-       
-       /** The length of the splitfile */
-       long dataLength;
-       /** The decompressed length of the compressed data */
-       long decompressedLength;
-       
-       /** The MIME type, as a string */
-       String mimeType;
-       
-       /** The compressed MIME type - lookup index for the MIME types table.
-        * Must be between 0 and 32767.
-        */
-       short compressedMIMEValue;
-       boolean hasCompressedMIMEParams;
-       short compressedMIMEParams;
-       
-       /** The simple redirect key */
-       FreenetURI simpleRedirectKey;
-       
-       short splitfileAlgorithm;
-       static final short SPLITFILE_NONREDUNDANT = 0;
-       static final short SPLITFILE_ONION_STANDARD = 1;
-       
-       /** Splitfile parameters */
-       byte[] splitfileParams;
-       int splitfileBlocks;
-       int splitfileCheckBlocks;
-       FreenetURI[] splitfileDataKeys;
-       FreenetURI[] splitfileCheckKeys;
-       
-       // Manifests
-       int manifestEntryCount;
-       /** Manifest entries by name */
-       HashMap manifestEntries;
-       
-       /** ZIP internal redirect: name of file in ZIP */
-       String nameInArchive;
-
-       ClientMetadata clientMetadata;
-
        /** Is a manifest? */
        public boolean isSimpleManifest() {
                return documentType == SIMPLE_MANIFEST;
@@ -672,7 +710,7 @@
                        if(extraMetadata) flags |= FLAGS_EXTRA_METADATA;
                        if(fullKeys) flags |= FLAGS_FULL_KEYS;
                        if(splitUseLengths) flags |= FLAGS_SPLIT_USE_LENGTHS;
-                       if(compressed) flags |= FLAGS_COMPRESSED;
+                       if(compressionCodec >= 0) flags |= FLAGS_COMPRESSED;
                        dos.writeShort(flags);
                }

@@ -684,7 +722,7 @@
                        dos.writeLong(dataLength);
                }

-               if(compressed) {
+               if(compressionCodec >= 0) {
                        dos.writeShort(compressionCodec);
                        dos.writeLong(decompressedLength);
                }
@@ -765,4 +803,24 @@
        public FreenetURI[] getSplitfileCheckKeys() {
                return splitfileCheckKeys;
        }
+
+       public boolean isCompressed() {
+               return compressionCodec >= 0;
+       }
+
+       public boolean splitUseLengths() {
+               return splitUseLengths;
+       }
+
+       public short getCompressionCodec() {
+               return compressionCodec;
+       }
+
+       public long dataLength() {
+               return dataLength;
+       }
+
+       public byte[] splitfileParams() {
+               return splitfileParams;
+       }
 }

Copied: branches/freenet-freejvms/src/freenet/client/PutWaiter.java (from rev 
7998, trunk/freenet/src/freenet/client/PutWaiter.java)

Deleted: branches/freenet-freejvms/src/freenet/client/RetryTracker.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/RetryTracker.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/RetryTracker.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,392 +0,0 @@
-package freenet.client;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Vector;
-
-import freenet.crypt.RandomSource;
-import freenet.support.Logger;
-
-/**
- * Keeps a list of SplitfileBlocks for each retry level.
- */
-public class RetryTracker {
-
-       class Level {
-               final int level;
-               final Vector blocks;
-
-               Level(int l) {
-                       level = l;
-                       blocks = new Vector();
-               }
-               
-               /**
-                * Return a random block.
-                * Call synchronized on RetryTracker.
-                */
-               SplitfileBlock getBlock() {
-                       int len = blocks.size();
-                       int x = random.nextInt(len);
-                       SplitfileBlock block = (SplitfileBlock) 
blocks.remove(x);
-                       if(blocks.isEmpty())
-                               removeLevel(level);
-                       return block;
-               }
-               
-               void add(SplitfileBlock block) {
-                       blocks.add(block);
-               }
-               
-               /**
-                * Remove a specific block.
-                * Remove self if run out of blocks.
-                * Call synchronized on RetryTracker.
-                */
-               void remove(SplitfileBlock block) {
-                       blocks.remove(block);
-                       if(blocks.isEmpty())
-                               removeLevel(level);
-               }
-       }
-
-       final FailureCodeTracker fatalErrors;
-       final FailureCodeTracker nonfatalErrors;
-       final HashMap levels;
-       final RandomSource random;
-       final int maxLevel;
-       final HashSet failedBlocksTooManyRetries;
-       final HashSet failedBlocksFatalErrors;
-       final HashSet runningBlocks;
-       final HashSet succeededBlocks;
-       private int curMaxLevel;
-       private int curMinLevel;
-       /** Maximum number of concurrently running threads */
-       final int maxThreads;
-       /** After we have successes on this many blocks, we should terminate, 
-        * even if there are threads running and blocks queued. */
-       final int targetSuccesses;
-       final boolean killOnFatalError;
-       private boolean killed;
-       private boolean finishOnEmpty;
-       private final RetryTrackerCallback callback;
-       private boolean callOnProgress = false;
-
-       /**
-        * Create a RetryTracker.
-        * @param maxLevel The maximum number of tries for each block.
-        * @param random The random number source.
-        * @param maxThreads The maximum number of threads to use.
-        * @param killOnFatalError Whether to terminate the tracker when a fatal
-        * error occurs on a single block.
-        * @param cb The callback to call .finish(...) when we no longer have
-        * anything to do *and* the client has set the finish on empty flag.
-        */
-       public RetryTracker(int maxLevel, int targetSuccesses, RandomSource 
random, int maxThreads, boolean killOnFatalError, RetryTrackerCallback cb, 
boolean isInsert) {
-               levels = new HashMap();
-               fatalErrors = new FailureCodeTracker(isInsert);
-               nonfatalErrors = new FailureCodeTracker(isInsert);
-               this.targetSuccesses = targetSuccesses;
-               this.maxLevel = maxLevel;
-               this.random = random;
-               curMaxLevel = curMinLevel = 0;
-               failedBlocksTooManyRetries = new HashSet();
-               failedBlocksFatalErrors = new HashSet();
-               runningBlocks = new HashSet();
-               succeededBlocks = new HashSet();
-               this.maxThreads = maxThreads;
-               this.killOnFatalError = killOnFatalError;
-               this.finishOnEmpty = false;
-               this.callback = cb;
-       }
-
-       /**
-        * Set the finish-on-empty flag to true.
-        * This means that when there are no longer any blocks to process, and 
there
-        * are none running, the tracker will call the client's finish(...) 
method.
-        */
-       public synchronized void setFinishOnEmpty() {
-               finishOnEmpty = true;
-       }
-       
-       /** Remove a level */
-       private synchronized void removeLevel(int level) {
-               Logger.minor(this, "Removing level "+level);
-               Integer x = new Integer(level);
-               levels.remove(x);
-               if(curMinLevel == level) {
-                       for(int i=curMinLevel;i<=curMaxLevel;i++) {
-                               x = new Integer(i);
-                               if(levels.get(x) != null) {
-                                       curMinLevel = i;
-                                       return;
-                               }
-                       }
-                       curMinLevel = curMaxLevel = 0;
-                       return;
-               }
-               if(curMaxLevel == level) {
-                       for(int i=curMaxLevel;i>=curMinLevel;i--) {
-                               x = new Integer(i);
-                               if(levels.get(x) != null) {
-                                       curMaxLevel = i;
-                                       return;
-                               }
-                       }
-                       curMinLevel = curMaxLevel = 0;
-                       return;
-               }
-       }
-
-       /** Add a level */
-       private synchronized Level addLevel(int level, Integer x) {
-               Logger.minor(this, "Adding level "+level);
-               if(level < 0) throw new IllegalArgumentException();
-               Level l = new Level(level);
-               if(levels.isEmpty()) {
-                       curMaxLevel = curMinLevel = level;
-               } else {
-                       if(level > curMaxLevel) curMaxLevel = level;
-                       if(level < curMinLevel) curMinLevel = level;
-               }
-               levels.put(x, l);
-               return l;
-       }
-       
-       /** Get an existing level, or add one if necessary */
-       private synchronized Level makeLevel(int level) {
-               Integer x = new Integer(level);
-               Level l = (Level) levels.get(x);
-               if(l == null) {
-                       return addLevel(level, x);
-               }
-               else return l;
-       }
-       
-       /**
-        * Add a block at retry level zero.
-        */
-       public synchronized void addBlock(SplitfileBlock block) {
-               if(killed) return;
-               Level l = makeLevel(0);
-               l.add(block);
-               maybeStart(true);
-       }
-       
-       /**
-        * A block got a nonfatal error and should be retried.
-        * Move it out of the running list and back into the relevant list, 
unless
-        * we have run out of retries.
-        */
-       public void nonfatalError(SplitfileBlock block, int reasonCode) {
-               synchronized(this) {
-                       nonfatalErrors.inc(reasonCode);
-                       runningBlocks.remove(block);
-                       int levelNumber = block.getRetryCount();
-                       levelNumber++;
-                       Logger.minor(this, "Non-fatal error on "+block+" -> 
"+levelNumber);
-                       if(levelNumber > maxLevel) {
-                               failedBlocksTooManyRetries.add(block);
-                               Logger.minor(this, "Finished with "+block);
-                       } else {
-                               Level newLevel = makeLevel(levelNumber);
-                               newLevel.add(block);
-                       }
-               }
-               maybeStart(false);
-               if(callOnProgress)
-                       callback.onProgress();
-       }
-       
-       /**
-        * A block got a fatal error and should not be retried.
-        * Move it into the fatal error list.
-        * @param reasonCode A client-specific code indicating the type of 
failure.
-        */
-       public void fatalError(SplitfileBlock block, int reasonCode) {
-               synchronized(this) {
-                       fatalErrors.inc(reasonCode);
-                       runningBlocks.remove(block);
-                       failedBlocksFatalErrors.add(block);
-               }
-               maybeStart(false);
-               if(callOnProgress)
-                       callback.onProgress();
-       }
-
-       /**
-        * If we can start some blocks, start some blocks.
-        * Otherwise if we are finished, call the callback's finish method.
-        */
-       public void maybeStart(boolean cantCallFinished) {
-               boolean callFinished = false;
-               synchronized(this) {
-               if(killed) return;
-               Logger.minor(this, "succeeded: "+succeededBlocks.size()+", 
target: "+targetSuccesses+
-                               ", failed: 
"+failedBlocksTooManyRetries.size()+", fatal: "+failedBlocksFatalErrors.size()+
-                               ", running: "+runningBlocks.size()+", levels: 
"+levels.size()+"("+curMinLevel+"-"+curMaxLevel+
-                               "), finishOnEmpty: "+finishOnEmpty+" for 
"+callback);
-               if(runningBlocks.size() == 1)
-                       Logger.minor(this, "Only block running: 
"+runningBlocks.toArray()[0]);
-               else if(levels.isEmpty()) {
-                       for(Iterator i=runningBlocks.iterator();i.hasNext();) {
-                               Logger.minor(this, "Still running: "+i.next());
-                       }
-               }
-               if((succeededBlocks.size() >= targetSuccesses)
-                               || (runningBlocks.isEmpty() && levels.isEmpty() 
&& finishOnEmpty)) {
-                       killed = true;
-                       Logger.minor(this, "Finishing");
-                       SplitfileBlock[] running = runningBlocks();
-                       for(int i=0;i<running.length;i++) {
-                               running[i].kill();
-                       }
-                       runningBlocks.clear();
-                       if(!cantCallFinished)
-                               callFinished = true;
-                       else {
-                               Runnable r = new Runnable() { public void run() 
{ callback.finished(succeededBlocks(), failedBlocks(), fatalErrorBlocks()); } };
-                               Thread t = new Thread(r);
-                               t.setDaemon(true);
-                               t.start();
-                       }
-               } else {
-                       while(runningBlocks.size() < maxThreads) {
-                               SplitfileBlock block = getBlock();
-                               if(block == null) break;
-                               Logger.minor(this, "Starting: "+block);
-                               block.start();
-                               runningBlocks.add(block);
-                       }
-               }
-               }
-               if(callFinished)
-                       callback.finished(succeededBlocks(), failedBlocks(), 
fatalErrorBlocks());
-       }
-
-       public void success(SplitfileBlock block) {
-               synchronized(this) {
-                       if(killed) return;
-                       runningBlocks.remove(block);
-                       succeededBlocks.add(block);
-               }
-               maybeStart(false);
-               if(callOnProgress)
-                       callback.onProgress();
-       }
-       
-       public synchronized void callOnProgress() {
-               callOnProgress = true;
-       }
-       
-       /**
-        * Get the next block to try. This is a randomly selected block from the
-        * lowest priority currently available. Move it into the running list.
-        */
-       public synchronized SplitfileBlock getBlock() {
-               if(killed) return null;
-               Integer iMin = new Integer(curMinLevel);
-               Level l = (Level) levels.get(iMin);
-               if(l == null) {
-                       if(!(curMinLevel == 0 && curMaxLevel == 0))
-                               Logger.error(this, "min="+curMinLevel+", 
max="+curMaxLevel+" but min does not exist!");
-                       if(!levels.isEmpty()) {
-                               Integer[] levelNums = (Integer[]) 
levels.keySet().toArray(new Integer[levels.size()]);
-                               java.util.Arrays.sort(levelNums);
-                               Integer x = levelNums[0];
-                               curMinLevel = x.intValue();
-                               Integer y = levelNums[levelNums.length-1];
-                               curMaxLevel = y.intValue();
-                               Logger.normal(this, "Corrected: 
min="+curMinLevel+", max="+curMaxLevel);
-                               return getBlock();
-                       }
-                       else return null;
-               }
-               return l.getBlock();
-       }
-       
-       /**
-        * Get all running blocks.
-        */
-       public synchronized SplitfileBlock[] runningBlocks() {
-               return (SplitfileBlock[]) 
-                       runningBlocks.toArray(new 
SplitfileBlock[runningBlocks.size()]);
-       }
-       
-       /**
-        * Get all blocks with fatal errors.
-        * SplitfileBlock's are assumed to remember their errors, so we don't.
-        */
-       public synchronized SplitfileBlock[] fatalErrorBlocks() {
-               return (SplitfileBlock[])
-                       failedBlocksFatalErrors.toArray(new 
SplitfileBlock[failedBlocksFatalErrors.size()]);
-       }
-       
-       /**
-        * Get all blocks which didn't succeed in the maximum number of tries.
-        */
-       public synchronized SplitfileBlock[] failedBlocks() {
-               return (SplitfileBlock[])
-               failedBlocksTooManyRetries.toArray(new 
SplitfileBlock[failedBlocksTooManyRetries.size()]);
-       }
-       
-       /**
-        * Get all successfully downloaded blocks.
-        */
-       public synchronized SplitfileBlock[] succeededBlocks() {
-               return (SplitfileBlock[])
-                       succeededBlocks.toArray(new 
SplitfileBlock[succeededBlocks.size()]);
-       }
-
-       public synchronized int succeededBlocksLength() {
-               return succeededBlocks.size();
-       }
-       
-       /**
-        * Count the number of blocks which could not be fetched because we ran 
out
-        * of retries.
-        */
-       public synchronized int countFailedBlocks() {
-               return failedBlocksTooManyRetries.size();
-       }
-       
-       /**
-        * Highest number of completed retries of any block so far.
-        */
-       public synchronized int highestRetries() {
-               return curMaxLevel;
-       }
-       
-       /**
-        * Lowest number of completed retries of any block so far.
-        */
-       public synchronized int lowestRetries() {
-               return curMinLevel;
-       }
-       
-       /**
-        * Are there more blocks to process?
-        */
-       public synchronized boolean moreBlocks() {
-               return !levels.isEmpty();
-       }
-
-       public FailureCodeTracker getAccumulatedFatalErrorCodes() {
-               return fatalErrors;
-       }
-       
-       public FailureCodeTracker getAccumulatedNonFatalErrorCodes() {
-               return nonfatalErrors;
-       }
-
-       public synchronized void kill() {
-               killed = true;
-               levels.clear();
-               for(Iterator i=runningBlocks.iterator();i.hasNext();) {
-                       SplitfileBlock sb = (SplitfileBlock) i.next();
-                       sb.kill();
-               }
-               runningBlocks.clear();
-       }
-}

Deleted: branches/freenet-freejvms/src/freenet/client/RetryTrackerCallback.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/RetryTrackerCallback.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/RetryTrackerCallback.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,21 +0,0 @@
-package freenet.client;
-
-/**
- * Object passed to RetryTracker. This is called when RetryTracker finishes.
- */
-public interface RetryTrackerCallback {
-
-       /**
-        * Notify the caller that we have finished.
-        * @param succeeded The blocks which succeeded.
-        * @param failed The blocks which failed.
-        * @param fatalErrors The blocks which got fatal errors.
-        */
-       void finished(SplitfileBlock[] succeeded, SplitfileBlock[] failed, 
SplitfileBlock[] fatalErrors);
-
-       /**
-        * When a block completes etc.
-        */
-       void onProgress();
-
-}

Deleted: branches/freenet-freejvms/src/freenet/client/Segment.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/Segment.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/Segment.java   2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,282 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.LinkedList;
-import java.util.Vector;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-
-/**
- * A segment, within a splitfile.
- * Self-starting Runnable.
- * 
- * Does not require locking, because all locking goes through the parent 
Segment.
- */
-public class Segment implements RetryTrackerCallback {
-
-       final short splitfileType;
-       final FreenetURI[] dataBlocks;
-       final FreenetURI[] checkBlocks;
-       final BlockFetcher[] dataBlockStatus;
-       final BlockFetcher[] checkBlockStatus;
-       final int minFetched;
-       private Vector blocksNotTried;
-       final SplitFetcher parentFetcher;
-       final ArchiveContext archiveContext;
-       final FetcherContext fetcherContext;
-       final long maxBlockLength;
-       final boolean nonFullBlocksAllowed;
-       /** Has the segment started to do something? Irreversible. */
-       private boolean started;
-       /** Has the segment finished processing? Irreversible. */
-       private boolean finished;
-       /** Bucket to store the data retrieved, after it has been decoded */
-       private Bucket decodedData;
-       /** Recently completed fetches */
-       final LinkedList recentlyCompletedFetches;
-       /** Running fetches */
-       LinkedList runningFetches;
-       /** Fetch context for block fetches */
-       final FetcherContext blockFetchContext;
-       /** Recursion level */
-       final int recursionLevel;
-       /** Retry tracker */
-       private final RetryTracker tracker;
-       private FetchException failureException;
-       
-       /**
-        * Create a Segment.
-        * @param splitfileType The type of the splitfile.
-        * @param splitfileDataBlocks The data blocks to fetch.
-        * @param splitfileCheckBlocks The check blocks to fetch.
-        */
-       public Segment(short splitfileType, FreenetURI[] splitfileDataBlocks, 
FreenetURI[] splitfileCheckBlocks,
-                       SplitFetcher fetcher, ArchiveContext actx, 
FetcherContext fctx, long maxTempLength, boolean useLengths, int recLevel) 
throws MetadataParseException {
-               this.splitfileType = splitfileType;
-               dataBlocks = splitfileDataBlocks;
-               checkBlocks = splitfileCheckBlocks;
-               if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
-                       minFetched = dataBlocks.length;
-               } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
-                       minFetched = dataBlocks.length;
-               } else throw new MetadataParseException("Unknown splitfile 
type"+splitfileType);
-               tracker = new RetryTracker(fctx.maxSplitfileBlockRetries, 
splitfileDataBlocks.length, fctx.random, fctx.maxSplitfileThreads, false, this, 
false);
-               // Don't add blocks to tracker yet, because don't want to start 
fetch yet.
-               parentFetcher = fetcher;
-               archiveContext = actx;
-               fetcherContext = fctx;
-               maxBlockLength = maxTempLength;
-               nonFullBlocksAllowed = useLengths;
-               started = false;
-               finished = false;
-               decodedData = null;
-               dataBlockStatus = new BlockFetcher[dataBlocks.length];
-               checkBlockStatus = new BlockFetcher[checkBlocks.length];
-               blocksNotTried = new Vector();
-               Vector firstSet = new 
Vector(dataBlocks.length+checkBlocks.length);
-               blocksNotTried.add(0, firstSet);
-               for(int i=0;i<dataBlocks.length;i++) {
-                       dataBlockStatus[i] = new BlockFetcher(this, tracker, 
dataBlocks[i], i, fctx.dontEnterImplicitArchives);
-                       firstSet.add(dataBlockStatus[i]);
-               }
-               for(int i=0;i<checkBlocks.length;i++) {
-                       checkBlockStatus[i] = new BlockFetcher(this, tracker, 
checkBlocks[i], dataBlockStatus.length + i, fctx.dontEnterImplicitArchives);
-                       firstSet.add(checkBlockStatus[i]);
-               }
-               recentlyCompletedFetches = new LinkedList();
-               runningFetches = new LinkedList();
-               // FIXME be a bit more flexible here depending on flags
-               if(useLengths) {
-                       blockFetchContext = new FetcherContext(fetcherContext, 
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
-                       this.recursionLevel = recLevel;
-               } else {
-                       blockFetchContext = new FetcherContext(fetcherContext, 
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
-                       this.recursionLevel = 0;
-               }
-               Logger.minor(this, "Created segment: data blocks: 
"+dataBlocks.length+", check blocks: "+checkBlocks.length+" "+this);
-       }
-
-       /**
-        * Is the segment finished? (Either error or fetched and decoded)?
-        */
-       public boolean isFinished() {
-               return finished;
-       }
-
-       /**
-        * If there was an error, throw it now.
-        */
-       public void throwError() throws FetchException {
-               if(failureException != null)
-                       throw failureException;
-       }
-
-       /**
-        * Return the length of the data, after decoding.
-        */
-       public long decodedLength() {
-               return decodedData.size();
-       }
-
-       /**
-        * Write the decoded data to the given output stream.
-        * Do not write more than the specified number of bytes (unless it is 
negative,
-        * in which case ignore it).
-        * @return The number of bytes written.
-        * @throws IOException If there was an error reading from the bucket 
the data is
-        * stored in, or writing to the stream provided.
-        */
-       public long writeDecodedDataTo(OutputStream os, long truncateLength) 
throws IOException {
-               long len = decodedData.size();
-               if(truncateLength >= 0 && truncateLength < len)
-                       len = truncateLength;
-               BucketTools.copyTo(decodedData, os, Math.min(truncateLength, 
decodedData.size()));
-               return len;
-       }
-
-       /**
-        * Return true if the Segment has been started, otherwise false.
-        */
-       public boolean isStarted() {
-               return started;
-       }
-
-       /**
-        * Start the Segment fetching the data. When it has finished fetching, 
it will
-        * notify the SplitFetcher. Note that we must not start fetching until 
this
-        * method is called, because of the requirement to not fetch all 
segments
-        * simultaneously.
-        */
-       public void start() {
-               started = true;
-               for(int i=0;i<dataBlockStatus.length;i++) {
-                       tracker.addBlock(dataBlockStatus[i]);
-               }
-               Logger.minor(this, "Added data blocks");
-               for(int i=0;i<checkBlockStatus.length;i++) {
-                       tracker.addBlock(checkBlockStatus[i]);
-               }
-               tracker.callOnProgress();
-               tracker.setFinishOnEmpty();
-       }
-
-       /**
-        * How many fetches are running?
-        */
-       private int runningFetches() {
-               synchronized(runningFetches) {
-                       return runningFetches.size();
-               }
-       }
-
-       /**
-        * Once we have enough data to decode, tell parent, and decode it.
-        */
-       public void finished(SplitfileBlock[] succeeded, SplitfileBlock[] 
failed, SplitfileBlock[] fatalErrors) {
-
-               Logger.minor(this, "Finished("+succeeded.length+", 
"+failed.length+", "+fatalErrors.length+")");
-               parentFetcher.gotBlocks(this);
-               if(succeeded.length >= minFetched)
-                       // Not finished yet, need to decode
-                       try {
-                               successfulFetch();
-                       } catch (Throwable t) {
-                               Logger.error(this, "Caught "+t+" decoding 
"+this);
-                               finished = true;
-                               failureException = new 
FetchException(FetchException.INTERNAL_ERROR, t);
-                               parentFetcher.segmentFinished(this);
-                       }
-               else {
-                       failureException = new 
SplitFetchException(failed.length, fatalErrors.length, succeeded.length, 
minFetched, 
tracker.getAccumulatedNonFatalErrorCodes().merge(tracker.getAccumulatedFatalErrorCodes()));
-                       finished = true;
-                       parentFetcher.segmentFinished(this);
-               }
-       }
-
-       /**
-        * Successful fetch, do the decode, tell the parent, etc.
-        */
-       private void successfulFetch() {
-               
-               // Now decode
-               Logger.minor(this, "Decoding "+this);
-               
-               FECCodec codec = FECCodec.getCodec(splitfileType, 
dataBlocks.length, checkBlocks.length);
-               try {
-                       if(splitfileType != Metadata.SPLITFILE_NONREDUNDANT) {
-                               // FIXME hardcoded block size below.
-                               codec.decode(dataBlockStatus, checkBlockStatus, 
32768, fetcherContext.bucketFactory);
-                               // Now have all the data blocks (not 
necessarily all the check blocks)
-                       }
-                       
-                       decodedData = 
fetcherContext.bucketFactory.makeBucket(-1);
-                       Logger.minor(this, "Copying data from data blocks");
-                       OutputStream os = decodedData.getOutputStream();
-                       for(int i=0;i<dataBlockStatus.length;i++) {
-                               BlockFetcher status = dataBlockStatus[i];
-                               Bucket data = status.fetchedData;
-                               BucketTools.copyTo(data, os, Long.MAX_VALUE);
-                       }
-                       Logger.minor(this, "Copied data");
-                       os.close();
-                       // Must set finished BEFORE calling parentFetcher.
-                       // Otherwise a race is possible that might result in it 
not seeing our finishing.
-                       finished = true;
-                       parentFetcher.segmentFinished(this);
-               } catch (IOException e) {
-                       Logger.minor(this, "Caught bucket error?: "+e, e);
-                       finished = true;
-                       failureException = new 
FetchException(FetchException.BUCKET_ERROR);
-                       parentFetcher.segmentFinished(this);
-                       return;
-               }
-               
-               // Now heal
-               
-               // Encode any check blocks we don't have
-               if(codec != null) {
-                       try {
-                               codec.encode(dataBlockStatus, checkBlockStatus, 
32768, fetcherContext.bucketFactory);
-                       } catch (IOException e) {
-                               Logger.error(this, "Bucket error while healing: 
"+e, e);
-                       }
-               }
-               
-               // Now insert *ALL* blocks on which we had at least one 
failure, and didn't eventually succeed
-               for(int i=0;i<dataBlockStatus.length;i++) {
-                       BlockFetcher block = dataBlockStatus[i];
-                       if(block.actuallyFetched) continue;
-                       if(block.completedTries == 0) {
-                               // 80% chance of not inserting, if we never 
tried it
-                               if(fetcherContext.random.nextInt(5) == 0) 
continue;
-                       }
-                       block.queueHeal();
-               }
-               
-               // FIXME heal check blocks too
-       }
-
-       public void onProgress() {
-               parentFetcher.onProgress();
-       }
-
-       public int fetchedBlocks() {
-               return tracker.succeededBlocksLength();
-       }
-
-       public int failedBlocks() {
-               return tracker.failedBlocks().length;
-       }
-
-       public int fatallyFailedBlocks() {
-               return tracker.fatalErrorBlocks().length;
-       }
-
-       public int runningBlocks() {
-               return tracker.runningBlocks().length;
-       }
-}

Deleted: branches/freenet-freejvms/src/freenet/client/SplitFetcher.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/SplitFetcher.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/SplitFetcher.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,253 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Vector;
-
-import com.onionnetworks.fec.FECCode;
-import com.onionnetworks.fec.FECCodeFactory;
-
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.keys.NodeCHK;
-import freenet.support.Bucket;
-import freenet.support.Fields;
-import freenet.support.Logger;
-
-/**
- * Class to fetch a splitfile.
- */
-public class SplitFetcher {
-
-       // 128/192. Crazy, but it's possible we'd get big erasures.
-       static final int ONION_STD_K = 128;
-       static final int ONION_STD_N = 192;
-       
-       /** The standard onion codec */
-       static FECCode onionStandardCode =
-               
FECCodeFactory.getDefault().createFECCode(ONION_STD_K,ONION_STD_N);
-       
-       /** The splitfile type. See the SPLITFILE_ constants on Metadata. */
-       final short splitfileType;
-       /** The segment length. -1 means not segmented and must get everything 
to decode. */
-       final int blocksPerSegment;
-       /** The segment length in check blocks. */
-       final int checkBlocksPerSegment;
-       /** Total number of segments */
-       final int segmentCount;
-       /** The detailed information on each segment */
-       final Segment[] segments;
-       /** The splitfile data blocks. */
-       final FreenetURI[] splitfileDataBlocks;
-       /** The splitfile check blocks. */
-       final FreenetURI[] splitfileCheckBlocks;
-       /** The archive context */
-       final ArchiveContext actx;
-       /** The fetch context */
-       final FetcherContext fctx;
-       /** Maximum temporary length */
-       final long maxTempLength;
-       /** Have all segments finished? Access synchronized. */
-       private boolean allSegmentsFinished = false;
-       /** Currently fetching segment */
-       private Segment fetchingSegment;
-       /** Array of unstarted segments. Modify synchronized. */
-       private Vector unstartedSegments;
-       /** Override length. If this is positive, truncate the splitfile to 
this length. */
-       private long overrideLength;
-       /** Accept non-full splitfile chunks? */
-       private boolean splitUseLengths;
-       
-       public SplitFetcher(Metadata metadata, ArchiveContext archiveContext, 
FetcherContext ctx, int recursionLevel) throws MetadataParseException, 
FetchException {
-               actx = archiveContext;
-               fctx = ctx;
-               overrideLength = metadata.dataLength;
-               this.maxTempLength = ctx.maxTempLength;
-               splitfileType = metadata.getSplitfileType();
-               splitfileDataBlocks = metadata.getSplitfileDataKeys();
-               splitfileCheckBlocks = metadata.getSplitfileCheckKeys();
-               splitUseLengths = metadata.splitUseLengths;
-               int blockLength = splitUseLengths ? -1 : NodeCHK.BLOCK_SIZE;
-               if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
-                       // Don't need to do much - just fetch everything and 
piece it together.
-                       blocksPerSegment = -1;
-                       checkBlocksPerSegment = -1;
-                       segmentCount = 1;
-               } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
-                       byte[] params = metadata.splitfileParams;
-                       if(params == null || params.length < 8)
-                               throw new MetadataParseException("No splitfile 
params");
-                       blocksPerSegment = Fields.bytesToInt(params, 0);
-                       checkBlocksPerSegment = Fields.bytesToInt(params, 4);
-                       if(blocksPerSegment > ctx.maxDataBlocksPerSegment
-                                       || checkBlocksPerSegment > 
ctx.maxCheckBlocksPerSegment)
-                               throw new 
FetchException(FetchException.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per 
segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
-                       segmentCount = (splitfileDataBlocks.length / 
blocksPerSegment) +
-                               (splitfileDataBlocks.length % blocksPerSegment 
== 0 ? 0 : 1);
-                       // Onion, 128/192.
-                       // Will be segmented.
-               } else throw new MetadataParseException("Unknown splitfile 
format: "+splitfileType);
-               Logger.minor(this, "Algorithm: "+splitfileType+", blocks per 
segment: "+blocksPerSegment+", check blocks per segment: 
"+checkBlocksPerSegment+", segments: "+segmentCount);
-               segments = new Segment[segmentCount]; // initially null on all 
entries
-               if(segmentCount == 1) {
-                       segments[0] = new Segment(splitfileType, 
splitfileDataBlocks, splitfileCheckBlocks, this, archiveContext, ctx, 
maxTempLength, splitUseLengths, recursionLevel+1);
-               } else {
-                       int dataBlocksPtr = 0;
-                       int checkBlocksPtr = 0;
-                       for(int i=0;i<segments.length;i++) {
-                               // Create a segment. Give it its keys.
-                               int copyDataBlocks = 
Math.min(splitfileDataBlocks.length - dataBlocksPtr, blocksPerSegment);
-                               int copyCheckBlocks = 
Math.min(splitfileCheckBlocks.length - checkBlocksPtr, checkBlocksPerSegment);
-                               FreenetURI[] dataBlocks = new 
FreenetURI[copyDataBlocks];
-                               FreenetURI[] checkBlocks = new 
FreenetURI[copyCheckBlocks];
-                               if(copyDataBlocks > 0)
-                                       System.arraycopy(splitfileDataBlocks, 
dataBlocksPtr, dataBlocks, 0, copyDataBlocks);
-                               if(copyCheckBlocks > 0)
-                                       System.arraycopy(splitfileCheckBlocks, 
checkBlocksPtr, checkBlocks, 0, copyCheckBlocks);
-                               dataBlocksPtr += copyDataBlocks;
-                               checkBlocksPtr += copyCheckBlocks;
-                               segments[i] = new Segment(splitfileType, 
dataBlocks, checkBlocks, this, archiveContext, ctx, maxTempLength, 
splitUseLengths, blockLength);
-                       }
-               }
-               unstartedSegments = new Vector();
-               for(int i=0;i<segments.length;i++)
-                       unstartedSegments.add(segments[i]);
-               Logger.minor(this, "Segments: "+unstartedSegments.size()+", 
data keys: "+splitfileDataBlocks.length+", check keys: 
"+(splitfileCheckBlocks==null?0:splitfileCheckBlocks.length));
-       }
-
-       /**
-        * Fetch the splitfile.
-        * Fetch one segment, while decoding the previous one.
-        * Fetch the segments in random order.
-        * When everything has been fetched and decoded, return the full data.
-        * @throws FetchException 
-        */
-       public Bucket fetch() throws FetchException {
-               /*
-                * While(true) {
-                *      Pick a random segment, start it fetching.
-                *      Wait for a segment to finish fetching, a segment to 
finish decoding, or an error.
-                *      If a segment finishes fetching:
-                *              Continue to start another one if there are any 
left
-                *      If a segment finishes decoding:
-                *              If all segments are decoded, assemble all the 
segments and return the data.
-                * 
-                * Segments are expected to automatically start decoding when 
they finish fetching,
-                * but to tell us either way.
-                */
-               while(true) {
-                       synchronized(this) {
-                               if(fetchingSegment == null) {
-                                       // Pick a random segment
-                                       fetchingSegment = 
chooseUnstartedSegment();
-                                       if(fetchingSegment == null) {
-                                               // All segments have started
-                                       } else {
-                                               fetchingSegment.start();
-                                       }
-                               }
-                               if(allSegmentsFinished) {
-                                       return finalStatus();
-                               }
-                               try {
-                                       wait(10*1000); // or wait()?
-                               } catch (InterruptedException e) {
-                                       // Ignore
-                               }
-                       }
-               }
-       }
-
-       private Segment chooseUnstartedSegment() {
-               synchronized(unstartedSegments) {
-                       if(unstartedSegments.isEmpty()) return null;
-                       int x = fctx.random.nextInt(unstartedSegments.size());
-                       Logger.minor(this, "Starting segment "+x+" of 
"+unstartedSegments.size());
-                       Segment s = (Segment) unstartedSegments.remove(x);
-                       return s;
-               }
-       }
-
-       /** Return the final status of the fetch. Throws an exception, or 
returns a 
-        * Bucket containing the fetched data.
-        * @throws FetchException If the fetch failed for some reason.
-        */
-       private Bucket finalStatus() throws FetchException {
-               long finalLength = 0;
-               for(int i=0;i<segments.length;i++) {
-                       Segment s = segments[i];
-                       if(!s.isFinished()) throw new 
IllegalStateException("Not all finished");
-                       s.throwError();
-                       // If still here, it succeeded
-                       finalLength += s.decodedLength();
-                       // Healing is done by Segment
-               }
-               if(finalLength > overrideLength)
-                       finalLength = overrideLength;
-               
-               long bytesWritten = 0;
-               OutputStream os = null;
-               Bucket output;
-               try {
-                       output = fctx.bucketFactory.makeBucket(finalLength);
-                       os = output.getOutputStream();
-                       for(int i=0;i<segments.length;i++) {
-                               Segment s = segments[i];
-                               long max = (finalLength < 0 ? 0 : (finalLength 
- bytesWritten));
-                               bytesWritten += s.writeDecodedDataTo(os, max);
-                       }
-               } catch (IOException e) {
-                       throw new FetchException(FetchException.BUCKET_ERROR, 
e);
-               } finally {
-                       if(os != null) {
-                               try {
-                                       os.close();
-                               } catch (IOException e) {
-                                       // If it fails to close it may return 
corrupt data.
-                                       throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                               }
-                       }
-               }
-               return output;
-       }
-
-       public void gotBlocks(Segment segment) {
-               Logger.minor(this, "Got blocks for segment: "+segment);
-               synchronized(this) {
-                       fetchingSegment = null;
-                       notifyAll();
-               }
-       }
-
-       public void segmentFinished(Segment segment) {
-               Logger.minor(this, "Finished segment: "+segment);
-               synchronized(this) {
-                       boolean allDone = true;
-                       for(int i=0;i<segments.length;i++)
-                               if(!segments[i].isFinished()) {
-                                       Logger.minor(this, "Segment 
"+segments[i]+" is not finished");
-                                       allDone = false;
-                               }
-                       if(allDone) allSegmentsFinished = true;
-                       notifyAll();
-               }
-       }
-
-       public void onProgress() {
-               int totalBlocks = splitfileDataBlocks.length;
-               int fetchedBlocks = 0;
-               int failedBlocks = 0;
-               int fatallyFailedBlocks = 0;
-               int runningBlocks = 0;
-               for(int i=0;i<segments.length;i++) {
-                       Logger.minor(this, "Segment: "+segments[i]+": 
fetched="+segments[i].fetchedBlocks()+", failedBlocks: 
"+segments[i].failedBlocks()+
-                                       ", fatally: 
"+segments[i].fatallyFailedBlocks()+", running: "+segments[i].runningBlocks());
-                       fetchedBlocks += segments[i].fetchedBlocks();
-                       failedBlocks += segments[i].failedBlocks();
-                       fatallyFailedBlocks += 
segments[i].fatallyFailedBlocks();
-                       runningBlocks += segments[i].runningBlocks();
-               }
-               fctx.eventProducer.produceEvent(new 
SplitfileProgressEvent(totalBlocks, fetchedBlocks, failedBlocks, 
fatallyFailedBlocks, runningBlocks));
-       }
-
-}

Deleted: branches/freenet-freejvms/src/freenet/client/SplitInserter.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/SplitInserter.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/SplitInserter.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,281 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.util.Vector;
-
-import freenet.client.events.GeneratedURIEvent;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.keys.NodeCHK;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.Compressor;
-
-/**
- * Insert a splitfile.
- */
-public class SplitInserter implements RetryTrackerCallback {
-
-       final Bucket origData;
-       final long dataLength;
-       final ClientMetadata clientMetadata;
-       final short compressionCodec;
-       final short splitfileAlgorithm;
-       final InserterContext ctx;
-       final RetryTracker tracker;
-       final int segmentSize;
-       final int checkSegmentSize;
-       final int blockSize;
-       final boolean isMetadata;
-       SplitfileBlock[] origDataBlocks;
-       InsertSegment encodingSegment;
-       InsertSegment[] segments;
-       private boolean finishedInserting = false;
-       private boolean getCHKOnly;
-       private int succeeded;
-       private int failed;
-       private int fatalErrors;
-       private int countCheckBlocks;
-       private SplitfileBlock[] fatalErrorBlocks;
-       private FileInserter inserter;
-       
-       public SplitInserter(Bucket data, ClientMetadata clientMetadata, 
Compressor compressor, short splitfileAlgorithm, InserterContext ctx, 
FileInserter inserter, int blockLength, boolean getCHKOnly, boolean isMetadata) 
throws InserterException {
-               this.origData = data;
-               this.getCHKOnly = getCHKOnly;
-               this.blockSize = blockLength;
-               this.clientMetadata = clientMetadata;
-               if(compressor == null)
-                       compressionCodec = -1;
-               else
-                       compressionCodec = compressor.codecNumberForMetadata();
-               this.splitfileAlgorithm = splitfileAlgorithm;
-               this.ctx = ctx;
-               this.dataLength = data.size();
-               segmentSize = ctx.splitfileSegmentDataBlocks;
-               checkSegmentSize = splitfileAlgorithm == 
Metadata.SPLITFILE_NONREDUNDANT ? 0 : ctx.splitfileSegmentCheckBlocks;
-               tracker = new RetryTracker(ctx.maxInsertRetries, 
Integer.MAX_VALUE, ctx.random, ctx.maxSplitInsertThreads, true, this, true);
-               try {
-                       splitIntoBlocks();
-               } catch (IOException e) {
-                       throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-               }
-               this.inserter = inserter;
-               this.isMetadata = isMetadata;
-       }
-
-       /**
-        * Inserts the splitfile.
-        * @return The URI of the resulting file.
-        * @throws InserterException If we are not able to insert the splitfile.
-        */
-       public FreenetURI run() throws InserterException {
-               try {
-                       startInsertingDataBlocks();
-                       splitIntoSegments(segmentSize);
-                       // Backwards, because the last is the shortest
-                       try {
-                               for(int i=segments.length-1;i>=0;i--) {
-                                       encodeSegment(i, origDataBlocks.length 
+ checkSegmentSize * i);
-                                       Logger.minor(this, "Encoded segment 
"+i+" of "+segments.length);
-                               }
-                       } catch (IOException e) {
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       }
-                       // Wait for the insertion thread to finish
-                       return waitForCompletion();
-               } catch (Throwable t) {
-                       Logger.error(this, "Caught "+t, t);
-                       tracker.kill();
-                       if(t instanceof InserterException) throw 
(InserterException)t;
-                       throw new 
InserterException(InserterException.INTERNAL_ERROR, t, null);
-               }
-       }
-
-       private FreenetURI waitForCompletion() throws InserterException {
-               tracker.setFinishOnEmpty();
-               synchronized(this) {
-                       while(!finishedInserting) {
-                               try {
-                                       wait(10*1000);
-                               } catch (InterruptedException e) {
-                                       // Ignore
-                               }
-                       }
-               }
-
-               // Create the manifest (even if we failed, so that the key is 
visible)
-
-               FreenetURI[] dataURIs = getDataURIs();
-               FreenetURI[] checkURIs = getCheckURIs();
-               
-               Logger.minor(this, "Data URIs: "+dataURIs.length+", check URIs: 
"+checkURIs.length);
-               
-               boolean missingURIs = anyNulls(dataURIs) || anyNulls(checkURIs);
-               
-               if(missingURIs && fatalErrors == 0 && failed == 0)
-                       throw new IllegalStateException();
-               
-               FreenetURI uri = null;
-               
-               if(!missingURIs) {
-               
-                       Metadata metadata = new Metadata(splitfileAlgorithm, 
dataURIs, checkURIs, segmentSize, checkSegmentSize, clientMetadata, dataLength, 
compressionCodec, isMetadata);
-                       
-                       Bucket mbucket;
-                       try {
-                               mbucket = 
BucketTools.makeImmutableBucket(ctx.bf, metadata.writeToByteArray());
-                       } catch (IOException e) {
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, null);
-                       }
-                       
-                       if(inserter == null)
-                               inserter = new FileInserter(ctx);
-                       
-                       InsertBlock mblock = new InsertBlock(mbucket, null, 
FreenetURI.EMPTY_CHK_URI);
-                       
-                       // FIXME probably should uncomment below so it doesn't 
get inserted at all?
-                       // FIXME this is a hack for small network support... 
but we will need that IRL... hmmm
-                       try {
-                               uri = inserter.run(mblock, true, getCHKOnly/* 
|| (fatalErrors > 0 || failed > 0)*/, false);
-                       } catch (InserterException e) {
-                               e.errorCodes = 
tracker.getAccumulatedNonFatalErrorCodes().merge(tracker.getAccumulatedFatalErrorCodes());
-                               throw e;
-                       }
-                       
-               }
-               // Did we succeed?
-               
-               ctx.eventProducer.produceEvent(new GeneratedURIEvent(uri));
-               
-               if(fatalErrors > 0) {
-                       throw new 
InserterException(InserterException.FATAL_ERRORS_IN_BLOCKS, 
tracker.getAccumulatedFatalErrorCodes(), uri);
-               }
-               
-               if(failed > 0) {
-                       throw new 
InserterException(InserterException.TOO_MANY_RETRIES_IN_BLOCKS, 
tracker.getAccumulatedNonFatalErrorCodes(), uri);
-               }
-               
-               return uri;
-       }
-
-       // FIXME move this to somewhere
-       private static boolean anyNulls(Object[] array) {
-               for(int i=0;i<array.length;i++)
-                       if(array[i] == null) return true;
-               return false;
-       }
-
-       private FreenetURI[] getCheckURIs() {
-               // Copy check blocks from each segment into a FreenetURI[].
-               FreenetURI[] uris = new FreenetURI[countCheckBlocks];
-               int x = 0;
-               for(int i=0;i<segments.length;i++) {
-                       FreenetURI[] segURIs = segments[i].getCheckURIs();
-                       if(x + segURIs.length > countCheckBlocks) 
-                               throw new IllegalStateException("x="+x+", 
segURIs="+segURIs.length+", countCheckBlocks="+countCheckBlocks);
-                       System.arraycopy(segURIs, 0, uris, x, segURIs.length);
-                       x += segURIs.length;
-               }
-
-               if(uris.length != x)
-                       throw new IllegalStateException("Total is wrong");
-               
-               return uris;
-       }
-
-       private FreenetURI[] getDataURIs() {
-               FreenetURI[] uris = new FreenetURI[origDataBlocks.length];
-               for(int i=0;i<uris.length;i++)
-                       uris[i] = origDataBlocks[i].getURI();
-               return uris;
-       }
-
-       private int encodeSegment(int i, int offset) throws IOException {
-               encodingSegment = segments[i];
-               return encodingSegment.encode(offset, tracker, ctx);
-       }
-
-       /**
-        * Start the insert, by adding all the data blocks.
-        */
-       private void startInsertingDataBlocks() {
-               for(int i=0;i<origDataBlocks.length;i++)
-                       tracker.addBlock(origDataBlocks[i]);
-               tracker.callOnProgress();
-       }
-
-       /**
-        * Split blocks into segments for encoding.
-        * @throws IOException If there is a bucket error encoding the file.
-        */
-       private void splitIntoBlocks() throws IOException {
-               Bucket[] dataBuckets = BucketTools.split(origData, 
NodeCHK.BLOCK_SIZE, ctx.bf);
-               origDataBlocks = new SplitfileBlock[dataBuckets.length];
-               for(int i=0;i<origDataBlocks.length;i++) {
-                       origDataBlocks[i] = new BlockInserter(dataBuckets[i], 
i, tracker, ctx, getCHKOnly);
-                       if(origDataBlocks[i].getData() == null)
-                               throw new NullPointerException("Block "+i+" of 
"+dataBuckets.length+" is null");
-               }
-       }
-
-       /**
-        * Group the blocks into segments.
-        */
-       private void splitIntoSegments(int segmentSize) {
-               int dataBlocks = origDataBlocks.length;
-
-               Vector segs = new Vector();
-               
-               // First split the data up
-               if(dataBlocks < segmentSize || segmentSize == -1) {
-                       // Single segment
-                       InsertSegment onlySeg = new 
InsertSegment(splitfileAlgorithm, origDataBlocks, blockSize, ctx.bf, 
getCHKOnly, 0);
-                       countCheckBlocks = onlySeg.checkBlocks.length;
-                       segs.add(onlySeg);
-               } else {
-                       int j = 0;
-                       int segNo = 0;
-                       for(int i=segmentSize;;i+=segmentSize) {
-                               if(i > dataBlocks) i = dataBlocks;
-                               SplitfileBlock[] seg = new SplitfileBlock[i-j];
-                               System.arraycopy(origDataBlocks, j, seg, 0, 
i-j);
-                               j = i;
-                               for(int x=0;x<seg.length;x++)
-                                       if(seg[x].getData() == null) throw new 
NullPointerException("In splitIntoSegs: "+x+" is null of "+seg.length+" of 
"+segNo);
-                               InsertSegment s = new 
InsertSegment(splitfileAlgorithm, seg, blockSize, ctx.bf, getCHKOnly, segNo);
-                               countCheckBlocks += s.checkBlocks.length;
-                               segs.add(s);
-                               
-                               if(i == dataBlocks) break;
-                               segNo++;
-                       }
-               }
-               segments = (InsertSegment[]) segs.toArray(new 
InsertSegment[segs.size()]);
-       }
-
-       public void finished(SplitfileBlock[] succeeded, SplitfileBlock[] 
failed, SplitfileBlock[] fatalErrors) {
-               synchronized(this) {
-                       finishedInserting = true;
-                       this.succeeded = succeeded.length;
-                       this.failed = failed.length;
-                       this.fatalErrorBlocks = fatalErrors;
-                       this.fatalErrors = fatalErrorBlocks.length;
-                       notify();
-               }
-       }
-
-       public void onProgress() {
-               /* What info to report?
-                * - Total number of blocks to insert.
-                * - 
-                */
-               int totalBlocks = origDataBlocks.length + countCheckBlocks;
-               int fetchedBlocks = tracker.succeededBlocks().length;
-               int failedBlocks = tracker.countFailedBlocks();
-               int fatallyFailedBlocks = tracker.fatalErrorBlocks().length;
-               int runningBlocks = tracker.runningBlocks().length;
-               ctx.eventProducer.produceEvent(new 
SplitfileProgressEvent(totalBlocks, fetchedBlocks, failedBlocks, 
fatallyFailedBlocks, runningBlocks));
-       }
-
-}

Modified: branches/freenet-freejvms/src/freenet/client/SplitfileBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/SplitfileBlock.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/SplitfileBlock.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,11 +1,9 @@
 package freenet.client;

-import freenet.client.RetryTracker.Level;
 import freenet.keys.FreenetURI;
 import freenet.support.Bucket;

-/** Simple interface for a splitfile block */
-public abstract class SplitfileBlock {
+public interface SplitfileBlock {

        /** Get block number. [0,k[ = data blocks, [k, n[ = check blocks */
        abstract int getNumber();
@@ -19,20 +17,5 @@
        /** Set data */
        abstract void setData(Bucket data);

-       /** Start the fetch (or insert). Implementation is required to call 
relevant
-        * methods on RetryTracker when done. */
-       abstract void start();

-       /**
-        * Shut down the fetch as soon as reasonably possible.
-        */
-       abstract public void kill();
-
-       /**
-        * Get the URI of the file. For an insert, this is derived during 
insert.
-        * For a request, it is fixed in the constructor.
-        */
-       abstract public FreenetURI getURI();
-
-       abstract public int getRetryCount();
 }

Modified: 
branches/freenet-freejvms/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/StandardOnionFECCodec.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/StandardOnionFECCodec.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -8,7 +8,6 @@

 import com.onionnetworks.fec.DefaultFECCodeFactory;
 import com.onionnetworks.fec.FECCode;
-import com.onionnetworks.fec.PureCode;
 import com.onionnetworks.util.Buffer;

 import freenet.support.Bucket;
@@ -24,7 +23,7 @@

        public class Encoder implements Runnable {

-               private final SplitfileBlock[] dataBlockStatus, 
checkBlockStatus;
+               private final Bucket[] dataBlockStatus, checkBlockStatus;
                private final int blockLength;
                private final BucketFactory bf;
                private IOException thrownIOE;
@@ -32,7 +31,7 @@
                private Error thrownError;
                private boolean finished;

-               public Encoder(SplitfileBlock[] dataBlockStatus, 
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory bf) {
+               public Encoder(Bucket[] dataBlockStatus, Bucket[] 
checkBlockStatus, int blockLength, BucketFactory bf) {
                        this.dataBlockStatus = dataBlockStatus;
                        this.checkBlockStatus = checkBlockStatus;
                        this.blockLength = blockLength;
@@ -70,7 +69,7 @@
        }

        // REDFLAG: How big is one of these?
-       private static int MAX_CACHED_CODECS = 16;
+       private static int MAX_CACHED_CODECS = 8;
        // REDFLAG: Optimal stripe size? Smaller => less memory usage, but more 
JNI overhead
        private static int STRIPE_SIZE = 4096;
        // REDFLAG: Make this configurable, maybe make it depend on # CPUs
@@ -235,7 +234,7 @@

                        if (idx < k)
                                throw new IllegalArgumentException(
-                                               "Must have at least k packets");
+                                               "Must have at least k packets 
(k="+k+",idx="+idx+")");

                        for (int i = 0; i < packetIndexes.length; i++)
                                Logger.minor(this, "[" + i + "] = " + 
packetIndexes[i]);
@@ -288,7 +287,7 @@
                }
        }

-       public void encode(SplitfileBlock[] dataBlockStatus, SplitfileBlock[] 
checkBlockStatus, int blockLength, BucketFactory bf) throws IOException {
+       public void encode(Bucket[] dataBlockStatus, Bucket[] checkBlockStatus, 
int blockLength, BucketFactory bf) throws IOException {
                // Encodes count as decodes.
                synchronized(runningDecodesSync) {
                        while(runningDecodes >= PARALLEL_DECODES) {
@@ -324,12 +323,26 @@
                        }
                }
        }
+       
+       public void encode(SplitfileBlock[] dataBlockStatus, SplitfileBlock[] 
checkBlockStatus, int blockLength, BucketFactory bf) throws IOException {
+               Bucket[] dataBlocks = new Bucket[dataBlockStatus.length];
+               Bucket[] checkBlocks = new Bucket[checkBlockStatus.length];
+               for(int i=0;i<dataBlocks.length;i++)
+                       dataBlocks[i] = dataBlockStatus[i].getData();
+               for(int i=0;i<checkBlocks.length;i++)
+                       checkBlocks[i] = checkBlockStatus[i].getData();
+               encode(dataBlocks, checkBlocks, blockLength, bf);
+               for(int i=0;i<dataBlocks.length;i++)
+                       dataBlockStatus[i].setData(dataBlocks[i]);
+               for(int i=0;i<checkBlocks.length;i++)
+                       checkBlockStatus[i].setData(checkBlocks[i]);
+       }

        /**
         * Do the actual encode.
         */
-       private void realEncode(SplitfileBlock[] dataBlockStatus,
-                       SplitfileBlock[] checkBlockStatus, int blockLength, 
BucketFactory bf)
+       private void realEncode(Bucket[] dataBlockStatus,
+                       Bucket[] checkBlockStatus, int blockLength, 
BucketFactory bf)
                        throws IOException {
 //             Runtime.getRuntime().gc();
 //             Runtime.getRuntime().runFinalization();
@@ -341,10 +354,9 @@
                                + " data blocks, " + checkBlockStatus.length
                                + " check blocks, block length " + blockLength 
+ " with "
                                + this);
-               if (dataBlockStatus.length + checkBlockStatus.length != n)
-                       throw new IllegalArgumentException();
-               if (dataBlockStatus.length != k)
-                       throw new IllegalArgumentException();
+               if (dataBlockStatus.length + checkBlockStatus.length != n ||
+                               dataBlockStatus.length != k)
+                       throw new IllegalArgumentException("Data blocks: 
"+dataBlockStatus.length+", Check blocks: "+checkBlockStatus.length+", n: 
"+n+", k: "+k);
                Buffer[] dataPackets = new Buffer[k];
                Buffer[] checkPackets = new Buffer[n - k];
                Bucket[] buckets = new Bucket[n];
@@ -366,7 +378,7 @@
                                                STRIPE_SIZE);

                        for (int i = 0; i < dataBlockStatus.length; i++) {
-                               buckets[i] = dataBlockStatus[i].getData();
+                               buckets[i] = dataBlockStatus[i];
                                long sz = buckets[i].size();
                                if (sz < blockLength) {
                                        if (i != dataBlockStatus.length - 1)
@@ -382,7 +394,7 @@
                        }

                        for (int i = 0; i < checkBlockStatus.length; i++) {
-                               buckets[i + k] = checkBlockStatus[i].getData();
+                               buckets[i + k] = checkBlockStatus[i];
                                if (buckets[i + k] == null) {
                                        buckets[i + k] = 
bf.makeBucket(blockLength);
                                        writers[i] = buckets[i + 
k].getOutputStream();
@@ -392,11 +404,20 @@
                                }
                        }

+//                     Runtime.getRuntime().gc();
+//                     Runtime.getRuntime().runFinalization();
+//                     Runtime.getRuntime().gc();
+//                     Runtime.getRuntime().runFinalization();
+                       long memUsedBeforeEncodes = 
Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+                       Logger.minor(this, "Memory in use before encodes: 
"+memUsedBeforeEncodes);
+                       
                        if (numberToEncode > 0) {
                                System.err.println("************* Encoding " + 
dataBlockStatus.length
                                                + " -> " + numberToEncode + " 
*************");
                                // Do the (striped) encode
                                for (int offset = 0; offset < blockLength; 
offset += STRIPE_SIZE) {
+                                       long memUsedBeforeRead = 
Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+                                       Logger.minor(this, "Memory in use 
before read: "+memUsedBeforeRead);
                                        // Read the data in first
                                        for (int i = 0; i < k; i++) {
                                                
readers[i].readFully(realBuffer, i * STRIPE_SIZE,
@@ -455,7 +476,7 @@
                        Bucket data = buckets[i + k];
                        if (data == null)
                                throw new NullPointerException();
-                       checkBlockStatus[i].setData(data);
+                       checkBlockStatus[i] = data;
                }
        }


Copied: 
branches/freenet-freejvms/src/freenet/client/StartableSplitfileBlock.java (from 
rev 7998, trunk/freenet/src/freenet/client/StartableSplitfileBlock.java)

Deleted: branches/freenet-freejvms/src/freenet/client/StdSplitfileBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/client/StdSplitfileBlock.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/StdSplitfileBlock.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,54 +0,0 @@
-package freenet.client;
-
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-public abstract class StdSplitfileBlock extends SplitfileBlock implements 
Runnable {
-
-       Bucket fetchedData;
-       protected final RetryTracker tracker;
-       /** Splitfile index - [0,k[ is the data blocks, [k,n[ is the check 
blocks */
-       protected final int index;
-
-       public StdSplitfileBlock(RetryTracker tracker2, int index2, Bucket 
data) {
-               if(tracker2 == null) throw new NullPointerException();
-               this.tracker = tracker2;
-               this.index = index2;
-               this.fetchedData = data;
-       }
-
-       public int getNumber() {
-               return index;
-       }
-
-       public boolean hasData() {
-               return fetchedData != null;
-       }
-
-       public Bucket getData() {
-               return fetchedData;
-       }
-
-       public void setData(Bucket data) {
-               if(data == fetchedData) return;
-               fetchedData = data;
-               Logger.minor(this, "Set data: "+(data == null ? "(null)" : 
(""+data.size())+ " on "+this), new Exception("debug"));
-       }
-
-       public void start() {
-               checkStartable();
-               Logger.minor(this, "Starting "+this);
-               try {
-                       Thread t = new Thread(this, getName());
-                       t.setDaemon(true);
-                       t.start();
-               } catch (Throwable error) {
-                       tracker.fatalError(this, 
InserterException.INTERNAL_ERROR);
-                       Logger.error(this, "Caught "+error+" creating thread 
for "+this);
-               }
-       }
-
-       public abstract String getName();
-       
-       protected abstract void checkStartable();
-}

Copied: branches/freenet-freejvms/src/freenet/client/async (from rev 7998, 
trunk/freenet/src/freenet/client/async)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/BaseClientPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BaseClientPutter.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/BaseClientPutter.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,9 +0,0 @@
-package freenet.client.async;
-
-public abstract class BaseClientPutter extends ClientRequest {
-
-       protected BaseClientPutter(short priorityClass, ClientRequestScheduler 
scheduler, Object context) {
-               super(priorityClass, scheduler, context);
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/BaseClientPutter.java (from 
rev 7998, trunk/freenet/src/freenet/client/async/BaseClientPutter.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientCallback.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientCallback.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,25 +0,0 @@
-package freenet.client.async;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.InserterException;
-import freenet.keys.FreenetURI;
-
-/**
- * A client process. Something that initiates requests, and can cancel
- * them. FCP, Fproxy, and the GlobalPersistentClient, implement this
- * somewhere.
- */
-public interface ClientCallback {
-
-       public void onSuccess(FetchResult result, ClientGetter state);
-       
-       public void onFailure(FetchException e, ClientGetter state);
-
-       public void onSuccess(BaseClientPutter state);
-       
-       public void onFailure(InserterException e, BaseClientPutter state);
-       
-       public void onGeneratedURI(FreenetURI uri, BaseClientPutter state);
-       
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientCallback.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientCallback.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientGetState.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetState.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientGetState.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,15 +0,0 @@
-package freenet.client.async;
-
-/**
- * A ClientGetState.
- * Represents a stage in the fetch process.
- */
-public abstract class ClientGetState {
-
-       public abstract ClientGetter getParent();
-
-       public abstract void schedule();
-
-       public abstract void cancel();
-
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientGetState.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientGetState.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientGetter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetter.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientGetter.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,99 +0,0 @@
-package freenet.client.async;
-
-import java.net.MalformedURLException;
-
-import freenet.client.ArchiveContext;
-import freenet.client.ClientMetadata;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.FetcherContext;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.support.Logger;
-
-/**
- * A high level data request.
- */
-public class ClientGetter extends ClientRequest implements 
GetCompletionCallback {
-
-       final ClientCallback client;
-       final FreenetURI uri;
-       final FetcherContext ctx;
-       final ArchiveContext actx;
-       ClientGetState currentState;
-       private boolean finished;
-       private int archiveRestarts;
-       
-       public ClientGetter(ClientCallback client, ClientRequestScheduler 
sched, FreenetURI uri, FetcherContext ctx, short priorityClass, Object 
clientContext) {
-               super(priorityClass, sched, clientContext);
-               this.client = client;
-               this.uri = uri;
-               this.ctx = ctx;
-               this.finished = false;
-               this.actx = new ArchiveContext();
-               archiveRestarts = 0;
-       }
-       
-       public void start() throws FetchException {
-               try {
-                       currentState = new SingleFileFetcher(this, this, new 
ClientMetadata(), uri, ctx, actx, getPriorityClass(), 0, false, null, true);
-                       currentState.schedule();
-               } catch (MalformedURLException e) {
-                       throw new FetchException(FetchException.INVALID_URI, e);
-               }
-       }
-
-       public void onSuccess(FetchResult result, ClientGetState state) {
-               finished = true;
-               currentState = null;
-               client.onSuccess(result, this);
-       }
-
-       public void onFailure(FetchException e, ClientGetState state) {
-               while(true) {
-                       if(e.mode == FetchException.ARCHIVE_RESTART) {
-                               archiveRestarts++;
-                               if(archiveRestarts > ctx.maxArchiveRestarts)
-                                       e = new 
FetchException(FetchException.TOO_MANY_ARCHIVE_RESTARTS);
-                               else {
-                                       try {
-                                               start();
-                                       } catch (FetchException e1) {
-                                               e = e1;
-                                               continue;
-                                       }
-                                       return;
-                               }
-                       }
-                       finished = true;
-                       client.onFailure(e, this);
-                       return;
-               }
-       }
-       
-       public void cancel() {
-               synchronized(this) {
-                       super.cancel();
-                       if(currentState != null)
-                               currentState.cancel();
-               }
-       }
-
-       public boolean isFinished() {
-               return finished || cancelled;
-       }
-
-       public FreenetURI getURI() {
-               return uri;
-       }
-
-       public void notifyClients() {
-               ctx.eventProducer.produceEvent(new 
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks, 
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks, 
this.blockSetFinalized));
-       }
-
-       public void onBlockSetFinished(ClientGetState state) {
-               Logger.minor(this, "Set finished", new Exception("debug"));
-               blockSetFinalized();
-       }
-       
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientGetter.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientGetter.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientPutState.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientPutState.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientPutState.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,14 +0,0 @@
-package freenet.client.async;
-
-/**
- * ClientPutState
- * 
- * Represents a state in the insert process.
- */
-public interface ClientPutState {
-
-       public abstract BaseClientPutter getParent();
-
-       public abstract void cancel();
-
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientPutState.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientPutState.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientPutter.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientPutter.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,105 +0,0 @@
-package freenet.client.async;
-
-import freenet.client.ClientMetadata;
-import freenet.client.InsertBlock;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.ClientKey;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-public class ClientPutter extends BaseClientPutter implements 
PutCompletionCallback {
-
-       final ClientCallback client;
-       final Bucket data;
-       final FreenetURI targetURI;
-       final ClientMetadata cm;
-       final InserterContext ctx;
-       private ClientPutState currentState;
-       private boolean finished;
-       private final boolean getCHKOnly;
-       private final boolean isMetadata;
-       private FreenetURI uri;
-
-       public ClientPutter(ClientCallback client, Bucket data, FreenetURI 
targetURI, ClientMetadata cm, InserterContext ctx,
-                       ClientRequestScheduler scheduler, short priorityClass, 
boolean getCHKOnly, boolean isMetadata, Object clientContext) {
-               super(priorityClass, scheduler, clientContext);
-               this.cm = cm;
-               this.isMetadata = isMetadata;
-               this.getCHKOnly = getCHKOnly;
-               this.client = client;
-               this.data = data;
-               this.targetURI = targetURI;
-               this.ctx = ctx;
-               this.finished = false;
-               this.cancelled = false;
-       }
-
-       public void start() throws InserterException {
-               try {
-                       currentState =
-                               new SingleFileInserter(this, this, new 
InsertBlock(data, cm, targetURI), isMetadata, ctx, false, getCHKOnly, false);
-                       ((SingleFileInserter)currentState).start();
-               } catch (InserterException e) {
-                       finished = true;
-                       currentState = null;
-               }
-       }
-
-       public void onSuccess(ClientPutState state) {
-               finished = true;
-               currentState = null;
-               client.onSuccess(this);
-       }
-
-       public void onFailure(InserterException e, ClientPutState state) {
-               finished = true;
-               currentState = null;
-               client.onFailure(e, this);
-       }
-
-       public void onEncode(ClientKey key, ClientPutState state) {
-               this.uri = key.getURI();
-               client.onGeneratedURI(uri, this);
-       }
-       
-       public void cancel() {
-               synchronized(this) {
-                       super.cancel();
-                       if(currentState != null)
-                               currentState.cancel();
-               }
-       }
-       
-       public boolean isFinished() {
-               return finished || cancelled;
-       }
-
-       public FreenetURI getURI() {
-               return uri;
-       }
-
-       public synchronized void onTransition(ClientPutState oldState, 
ClientPutState newState) {
-               if(currentState == oldState)
-                       currentState = newState;
-               else
-                       Logger.error(this, "onTransition: cur="+currentState+", 
old="+oldState+", new="+newState);
-       }
-
-       public void onMetadata(Metadata m, ClientPutState state) {
-               Logger.error(this, "Got metadata on "+this+" from "+state+" 
(this means the metadata won't be inserted)");
-       }
-       
-       public void notifyClients() {
-               ctx.eventProducer.produceEvent(new 
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks, 
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks, 
this.blockSetFinalized));
-       }
-       
-       public void onBlockSetFinished(ClientPutState state) {
-               Logger.minor(this, "Set finished", new Exception("debug"));
-               blockSetFinalized();
-       }
-       
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientPutter.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientPutter.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/ClientRequest.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequest.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/ClientRequest.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,108 +0,0 @@
-package freenet.client.async;
-
-import freenet.keys.FreenetURI;
-import freenet.support.Logger;
-
-/** A high level client request. A request (either fetch or put) started
- * by a Client. Has a suitable context and a URI; is fulfilled only when
- * we have followed all the redirects etc, or have an error. Can be 
- * retried.
- */
-public abstract class ClientRequest {
-
-       // FIXME move the priority classes from RequestStarter here
-       protected short priorityClass;
-       protected boolean cancelled;
-       final ClientRequestScheduler scheduler;
-       protected final Object client;
-       
-       public short getPriorityClass() {
-               return priorityClass;
-       }
-       
-       protected ClientRequest(short priorityClass, ClientRequestScheduler 
scheduler, Object client) {
-               this.priorityClass = priorityClass;
-               this.scheduler = scheduler;
-               this.client = client;
-       }
-       
-       public void cancel() {
-               cancelled = true;
-       }
-       
-       public boolean isCancelled() {
-               return cancelled;
-       }
-       
-       public abstract FreenetURI getURI();
-       
-       public abstract boolean isFinished();
-       
-       /** Total number of blocks this request has tried to fetch/put. */
-       protected int totalBlocks;
-       /** Number of blocks we have successfully completed a fetch/put for. */
-       protected int successfulBlocks;
-       /** Number of blocks which have failed. */
-       protected int failedBlocks;
-       /** Number of blocks which have failed fatally. */
-       protected int fatallyFailedBlocks;
-       /** Minimum number of blocks required to succeed for success. */
-       protected int minSuccessBlocks;
-       /** Has totalBlocks stopped growing? */
-       protected boolean blockSetFinalized;
-       
-       public void blockSetFinalized() {
-               synchronized(this) {
-                       if(blockSetFinalized) return;
-                       blockSetFinalized = true;
-               }
-               notifyClients();
-       }
-       
-       public synchronized void addBlock() {
-               if(blockSetFinalized)
-                       Logger.error(this, "addBlock() but set finalized! on 
"+this, new Exception("error"));
-               totalBlocks++;
-       }
-       
-       public synchronized void addBlocks(int num) {
-               if(blockSetFinalized)
-                       Logger.error(this, "addBlock() but set finalized! on 
"+this, new Exception("error"));
-               totalBlocks+=num;
-       }
-       
-       public void completedBlock(boolean dontNotify) {
-               Logger.minor(this, "Completed block ("+dontNotify+")");
-               synchronized(this) {
-                       successfulBlocks++;
-                       if(dontNotify) return;
-               }
-               notifyClients();
-       }
-       
-       public void failedBlock() {
-               synchronized(this) {
-                       failedBlocks++;
-               }
-               notifyClients();
-       }
-       
-       public void fatallyFailedBlock() {
-               synchronized(this) {
-                       fatallyFailedBlocks++;
-               }
-               notifyClients();
-       }
-       
-       public synchronized void addMustSucceedBlocks(int blocks) {
-               minSuccessBlocks += blocks;
-       }
-       
-       public abstract void notifyClients();
-
-       /** Get client context object */
-       public Object getClient() {
-               return client;
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/ClientRequest.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/ClientRequest.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/ClientRequestScheduler.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/ClientRequestScheduler.java  
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,126 +0,0 @@
-package freenet.client.async;
-
-import freenet.crypt.RandomSource;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyVerifyException;
-import freenet.node.LowLevelGetException;
-import freenet.node.Node;
-import freenet.node.RequestStarter;
-import freenet.support.Logger;
-import freenet.support.SectoredRandomGrabArrayWithInt;
-import freenet.support.SortedVectorByNumber;
-
-/**
- * Every X seconds, the RequestSender calls the ClientRequestScheduler to
- * ask for a request to start. A request is then started, in its own 
- * thread. It is removed at that point.
- */
-public class ClientRequestScheduler implements RequestScheduler {
-
-       /**
-        * Structure:
-        * array (by priority) -> // one element per possible priority
-        * SortedVectorByNumber (by # retries) -> // contains each current 
#retries
-        * RandomGrabArray // contains each element, allows fast 
fetch-and-drop-a-random-element
-        * 
-        * To speed up fetching, a RGA or SVBN must only exist if it is 
non-empty.
-        */
-       final SortedVectorByNumber[] priorities;
-       // we have one for inserts and one for requests
-       final boolean isInsertScheduler;
-       final RandomSource random;
-       private final RequestStarter starter;
-       private final Node node;
-       
-       public ClientRequestScheduler(boolean forInserts, RandomSource random, 
RequestStarter starter, Node node) {
-               this.starter = starter;
-               this.random = random;
-               this.node = node;
-               this.isInsertScheduler = forInserts;
-               priorities = new 
SortedVectorByNumber[RequestStarter.NUMBER_OF_PRIORITY_CLASSES];
-       }
-       
-       public void register(SendableRequest req) {
-               Logger.minor(this, "Registering "+req, new Exception("debug"));
-               if((!isInsertScheduler) && req instanceof ClientPutter)
-                       throw new IllegalArgumentException("Expected a 
ClientPut: "+req);
-               if(req instanceof SendableGet) {
-                       SendableGet getter = (SendableGet)req;
-                       ClientKeyBlock block;
-                       try {
-                               block = node.fetchKey(getter.getKey());
-                       } catch (KeyVerifyException e) {
-                               // Verify exception, probably bogus at source;
-                               // verifies at low-level, but not at decode.
-                               getter.onFailure(new 
LowLevelGetException(LowLevelGetException.DECODE_FAILED));
-                               return;
-                       }
-                       if(block != null) {
-                               Logger.minor(this, "Can fulfill immediately 
from store");
-                               getter.onSuccess(block, true);
-                               return;
-                       }
-               }
-               synchronized(this) {
-                       SectoredRandomGrabArrayWithInt grabber = 
-                               makeGrabArray(req.getPriorityClass(), 
req.getRetryCount());
-                       grabber.add(req.getClient(), req);
-                       Logger.minor(this, "Registered "+req+" on 
prioclass="+req.getPriorityClass()+", retrycount="+req.getRetryCount());
-               }
-               synchronized(starter) {
-                       starter.notifyAll();
-               }
-       }
-       
-       private synchronized SectoredRandomGrabArrayWithInt makeGrabArray(short 
priorityClass, int retryCount) {
-               SortedVectorByNumber prio = priorities[priorityClass];
-               if(prio == null) {
-                       prio = new SortedVectorByNumber();
-                       priorities[priorityClass] = prio;
-               }
-               SectoredRandomGrabArrayWithInt grabber = 
(SectoredRandomGrabArrayWithInt) prio.get(retryCount);
-               if(grabber == null) {
-                       grabber = new SectoredRandomGrabArrayWithInt(random, 
retryCount);
-                       prio.add(grabber);
-                       Logger.minor(this, "Registering retry count 
"+retryCount+" with prioclass "+priorityClass);
-               }
-               return grabber;
-       }
-
-       public synchronized SendableRequest removeFirst() {
-               // Priorities start at 0
-               Logger.minor(this, "removeFirst()");
-               for(int i=0;i<RequestStarter.MINIMUM_PRIORITY_CLASS;i++) {
-                       SortedVectorByNumber s = priorities[i];
-                       if(s == null) {
-                               Logger.minor(this, "Priority "+i+" is null");
-                               continue;
-                       }
-                       while(true) {
-                               SectoredRandomGrabArrayWithInt rga = 
(SectoredRandomGrabArrayWithInt) s.getFirst(); // will discard finished items
-                               if(rga == null) {
-                                       Logger.minor(this, "No retrycount's in 
priority "+i);
-                                       priorities[i] = null;
-                                       break;
-                               }
-                               SendableRequest req = (SendableRequest) 
rga.removeRandom();
-                               if(rga.isEmpty()) {
-                                       Logger.minor(this, "Removing retrycount 
"+rga.getNumber());
-                                       s.remove(rga.getNumber());
-                                       if(s.isEmpty()) {
-                                               Logger.minor(this, "Removing 
priority "+i);
-                                               priorities[i] = null;
-                                       }
-                               }
-                               if(req == null) {
-                                       Logger.minor(this, "No requests in 
priority "+i+", retrycount "+rga.getNumber()+" ("+rga+")");
-                                       continue;
-                               }
-                               Logger.minor(this, "removeFirst() returning 
"+req+" ("+rga.getNumber()+")");
-                               return req;
-                       }
-               }
-               Logger.minor(this, "No requests to run");
-               return null;
-       }
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/ClientRequestScheduler.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/GetCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/GetCompletionCallback.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/GetCompletionCallback.java   
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,21 +0,0 @@
-package freenet.client.async;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-
-/**
- * Callback called when part of a get request completes - either with a 
- * Bucket full of data, or with an error.
- */
-public interface GetCompletionCallback {
-
-       public void onSuccess(FetchResult result, ClientGetState state);
-       
-       public void onFailure(FetchException e, ClientGetState state);
-       
-       /** Called when the ClientGetState knows that it knows about
-        * all the blocks it will need to fetch.
-        */
-       public void onBlockSetFinished(ClientGetState state);
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/GetCompletionCallback.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/GetCompletionCallback.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/MinimalSplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/MinimalSplitfileBlock.java   
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,32 +0,0 @@
-package freenet.client.async;
-
-import freenet.client.SplitfileBlock;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-
-public class MinimalSplitfileBlock implements SplitfileBlock {
-
-       public final int number;
-       Bucket data;
-       
-       public MinimalSplitfileBlock(int n) {
-               this.number = n;
-       }
-
-       public int getNumber() {
-               return number;
-       }
-
-       public boolean hasData() {
-               return data != null;
-       }
-
-       public Bucket getData() {
-               return data;
-       }
-
-       public void setData(Bucket data) {
-               this.data = data;
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/MinimalSplitfileBlock.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/MultiPutCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/MultiPutCompletionCallback.java
  2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,137 +0,0 @@
-package freenet.client.async;
-
-import java.util.LinkedList;
-import java.util.ListIterator;
-
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.keys.ClientKey;
-import freenet.support.Logger;
-
-public class MultiPutCompletionCallback implements PutCompletionCallback, 
ClientPutState {
-
-       // LinkedList's rather than HashSet's for memory reasons.
-       // This class will not be used with large sets, so O(n) is cheaper than 
O(1) -
-       // at least it is on memory!
-       private final LinkedList waitingFor;
-       private final LinkedList waitingForBlockSet;
-       private final PutCompletionCallback cb;
-       private ClientPutState generator;
-       private final BaseClientPutter parent;
-       private InserterException e;
-       private boolean finished;
-       private boolean started;
-       
-       public MultiPutCompletionCallback(PutCompletionCallback cb, 
BaseClientPutter parent) {
-               this.cb = cb;
-               this.waitingFor = new LinkedList();
-               this.waitingForBlockSet = new LinkedList();
-               this.parent = parent;
-               finished = false;
-       }
-
-       public void onSuccess(ClientPutState state) {
-               synchronized(this) {
-                       if(finished) return;
-                       waitingFor.remove(state);
-                       if(!(waitingFor.isEmpty() && started))
-                               return;
-               }
-               complete(null);
-       }
-
-       public void onFailure(InserterException e, ClientPutState state) {
-               synchronized(this) {
-                       if(finished) return;
-                       waitingFor.remove(state);
-                       if(!(waitingFor.isEmpty() && started)) {
-                               this.e = e;
-                               return;
-                       }
-               }
-               complete(e);
-       }
-
-       private synchronized void complete(InserterException e) {
-               finished = true;
-               if(e != null)
-                       cb.onFailure(e, this);
-               else
-                       cb.onSuccess(this);
-       }
-
-       public synchronized void addURIGenerator(ClientPutState ps) {
-               add(ps);
-               generator = ps;
-       }
-       
-       public synchronized void add(ClientPutState ps) {
-               if(finished) return;
-               waitingFor.add(ps);
-       }
-
-       public void arm() {
-               boolean allDone;
-               boolean allGotBlocks;
-               synchronized(this) {
-                       started = true;
-                       allDone = waitingFor.isEmpty();
-                       allGotBlocks = waitingForBlockSet.isEmpty();
-               }
-               if(allGotBlocks) {
-                       cb.onBlockSetFinished(this);
-               }
-               if(allDone) {
-                       complete(e);
-               }
-       }
-
-       public BaseClientPutter getParent() {
-               return parent;
-       }
-
-       public void onEncode(ClientKey key, ClientPutState state) {
-               synchronized(this) {
-                       if(state != generator) return;
-               }
-               cb.onEncode(key, this);
-       }
-
-       public void cancel() {
-               ClientPutState[] states = new ClientPutState[waitingFor.size()];
-               synchronized(this) {
-                       states = (ClientPutState[]) waitingFor.toArray(states);
-               }
-               for(int i=0;i<states.length;i++)
-                       states[i].cancel();
-       }
-
-       public synchronized void onTransition(ClientPutState oldState, 
ClientPutState newState) {
-               if(generator == oldState)
-                       generator = newState;
-               if(oldState == newState) return;
-               for(ListIterator i = waitingFor.listIterator(0);i.hasNext();) {
-                       if(i.next() == oldState) {
-                               i.remove();
-                               i.add(newState);
-                       }
-               }
-       }
-
-       public void onMetadata(Metadata m, ClientPutState state) {
-               if(generator == state) {
-                       cb.onMetadata(m, this);
-               } else {
-                       Logger.error(this, "Got metadata for "+state);
-               }
-       }
-
-       public void onBlockSetFinished(ClientPutState state) {
-               synchronized(this) {
-                       this.waitingForBlockSet.remove(state);
-                       if(!started) return;
-               }
-               cb.onBlockSetFinished(this);
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/MultiPutCompletionCallback.java
 (from rev 7998, 
trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/PutCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/PutCompletionCallback.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/PutCompletionCallback.java   
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,31 +0,0 @@
-package freenet.client.async;
-
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.keys.ClientKey;
-
-/**
- * Callback called when part of a put request completes.
- */
-public interface PutCompletionCallback {
-
-       public void onSuccess(ClientPutState state);
-       
-       public void onFailure(InserterException e, ClientPutState state);
-
-       public void onEncode(ClientKey key, ClientPutState state);
-       
-       public void onTransition(ClientPutState oldState, ClientPutState 
newState);
-       
-       /** Only called if explicitly asked for, in which case, generally
-        * the metadata won't be inserted. Won't be called if there isn't
-        * any!
-        */
-       public void onMetadata(Metadata m, ClientPutState state);
-       
-       /** Called when the ClientPutState knows that it knows about
-        * all the blocks it will need to put.
-        */
-       public void onBlockSetFinished(ClientPutState state);
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/PutCompletionCallback.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/PutCompletionCallback.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/RequestScheduler.java
===================================================================
--- trunk/freenet/src/freenet/client/async/RequestScheduler.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/RequestScheduler.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,7 +0,0 @@
-package freenet.client.async;
-
-public interface RequestScheduler {
-
-       public SendableRequest removeFirst();
-       
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/RequestScheduler.java (from 
rev 7998, trunk/freenet/src/freenet/client/async/RequestScheduler.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/SendableGet.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SendableGet.java     2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SendableGet.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,20 +0,0 @@
-package freenet.client.async;
-
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.node.LowLevelGetException;
-
-/**
- * A low-level key fetch which can be sent immediately. @see SendableRequest
- */
-public interface SendableGet extends SendableRequest {
-
-       public ClientKey getKey();
-       
-       /** Called when/if the low-level request succeeds. */
-       public void onSuccess(ClientKeyBlock block, boolean fromStore);
-       
-       /** Called when/if the low-level request fails. */
-       public void onFailure(LowLevelGetException e);
-       
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/SendableGet.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/SendableGet.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/SendableInsert.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SendableInsert.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SendableInsert.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,26 +0,0 @@
-package freenet.client.async;
-
-import freenet.keys.ClientKeyBlock;
-import freenet.node.LowLevelPutException;
-
-/**
- * Callback interface for a low level insert, which is immediately sendable. 
These
- * should be registered on the ClientRequestScheduler when we want to send 
them. It will
- * then, when it is time to send, create a thread, send the request, and call 
the 
- * callback below.
- */
-public interface SendableInsert extends SendableRequest {
-
-       /** Get the ClientKeyBlock to insert. This may be created
-        * just-in-time, and may return null; ClientRequestScheduler
-        * will simply unregister the SendableInsert if this happens.
-        */
-       public ClientKeyBlock getBlock();
-       
-       /** Called when we successfully insert the data */
-       public void onSuccess();
-       
-       /** Called when we don't! */
-       public void onFailure(LowLevelPutException e);
-
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/SendableInsert.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/SendableInsert.java)

Deleted: branches/freenet-freejvms/src/freenet/client/async/SendableRequest.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SendableRequest.java 2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SendableRequest.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,22 +0,0 @@
-package freenet.client.async;
-
-import freenet.node.Node;
-import freenet.support.RandomGrabArrayItem;
-
-/**
- * A low-level request which can be sent immediately. These are registered
- * on the ClientRequestScheduler.
- */
-public interface SendableRequest extends RandomGrabArrayItem {
-       
-       public short getPriorityClass();
-       
-       public int getRetryCount();
-       
-       /** ONLY called by RequestStarter */
-       public void send(Node node);
-       
-       /** Get client context object */
-       public Object getClient();
-       
-}

Copied: branches/freenet-freejvms/src/freenet/client/async/SendableRequest.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/SendableRequest.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SimpleManifestPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/SimpleManifestPutter.java    
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,338 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-
-import freenet.client.ClientMetadata;
-import freenet.client.DefaultMIMETypes;
-import freenet.client.InsertBlock;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.ClientKey;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-
-public class SimpleManifestPutter extends BaseClientPutter implements 
PutCompletionCallback {
-       // Only implements PutCompletionCallback for the final metadata insert
-
-       private class PutHandler extends BaseClientPutter implements 
PutCompletionCallback {
-
-               protected PutHandler(String name, Bucket data, ClientMetadata 
cm, boolean getCHKOnly) throws InserterException {
-                       super(SimpleManifestPutter.this.getPriorityClass(), 
SimpleManifestPutter.this.scheduler, SimpleManifestPutter.this.client);
-                       this.name = name;
-                       this.cm = cm;
-                       InsertBlock block = 
-                               new InsertBlock(data, cm, 
FreenetURI.EMPTY_CHK_URI);
-                       this.origSFI =
-                               new SingleFileInserter(this, this, block, 
false, ctx, false, getCHKOnly, false);
-                       currentState = origSFI;
-                       metadata = null;
-               }
-
-               private SingleFileInserter origSFI;
-               private ClientPutState currentState;
-               private ClientMetadata cm;
-               private final String name;
-               private byte[] metadata;
-               private boolean finished;
-               
-               public void start() throws InserterException {
-                       origSFI.start();
-                       origSFI = null;
-               }
-               
-               public FreenetURI getURI() {
-                       return null;
-               }
-
-               public boolean isFinished() {
-                       return finished || cancelled || 
SimpleManifestPutter.this.cancelled;
-               }
-
-               public void onSuccess(ClientPutState state) {
-                       Logger.minor(this, "Completed "+this);
-                       synchronized(SimpleManifestPutter.this) {
-                               runningPutHandlers.remove(this);
-                               if(!runningPutHandlers.isEmpty()) {
-                                       return;
-                               }
-                       }
-                       insertedAllFiles();
-               }
-
-               public void onFailure(InserterException e, ClientPutState 
state) {
-                       Logger.minor(this, "Failed: "+this+" - "+e, e);
-                       fail(e);
-               }
-
-               public void onEncode(ClientKey key, ClientPutState state) {
-                       if(metadata == null) {
-                               // Don't have metadata yet
-                               // Do have key
-                               // So make a redirect to the key
-                               Metadata m =
-                                       new Metadata(Metadata.SIMPLE_REDIRECT, 
key.getURI(), cm);
-                               onMetadata(m, null);
-                       }
-               }
-
-               public void onTransition(ClientPutState oldState, 
ClientPutState newState) {
-                       if(oldState == this) {
-                               // We do not need to have a hashtable of state 
-> PutHandler.
-                               // Because we can just pull the parent off the 
state!
-                               this.currentState = newState;
-                       }
-               }
-
-               public void onMetadata(Metadata m, ClientPutState state) {
-                       if(metadata != null) {
-                               Logger.error(this, "Reassigning metadata", new 
Exception("debug"));
-                               return;
-                       }
-                       metadata = m.writeToByteArray();
-                       synchronized(SimpleManifestPutter.this) {
-                               putHandlersWaitingForMetadata.remove(this);
-                               if(!putHandlersWaitingForMetadata.isEmpty()) 
return;
-                               gotAllMetadata();
-                       }
-               }
-
-               public void addBlock() {
-                       SimpleManifestPutter.this.addBlock();
-               }
-               
-               public void addBlocks(int num) {
-                       SimpleManifestPutter.this.addBlocks(num);
-               }
-               
-               public void completedBlock(boolean dontNotify) {
-                       SimpleManifestPutter.this.completedBlock(dontNotify);
-               }
-               
-               public void failedBlock() {
-                       SimpleManifestPutter.this.failedBlock();
-               }
-               
-               public void fatallyFailedBlock() {
-                       SimpleManifestPutter.this.fatallyFailedBlock();
-               }
-               
-               public void addMustSucceedBlocks(int blocks) {
-                       SimpleManifestPutter.this.addMustSucceedBlocks(blocks);
-               }
-               
-               public void notifyClients() {
-                       // FIXME generate per-filename events???
-               }
-
-               public void onBlockSetFinished(ClientPutState state) {
-                       synchronized(SimpleManifestPutter.this) {
-                               waitingForBlockSets.remove(this);
-                               if(!waitingForBlockSets.isEmpty()) return;
-                       }
-                       SimpleManifestPutter.this.blockSetFinalized();
-               }
-       }
-
-       private final HashMap putHandlersByName;
-       private final HashSet runningPutHandlers;
-       private final HashSet putHandlersWaitingForMetadata;
-       private final HashSet waitingForBlockSets;
-       private FreenetURI finalURI;
-       private FreenetURI targetURI;
-       private boolean finished;
-       private final InserterContext ctx;
-       private final ClientCallback cb;
-       private final boolean getCHKOnly;
-       private boolean insertedAllFiles;
-       private boolean insertedManifest;
-       private ClientPutState currentMetadataInserterState;
-       private final String defaultName;
-       private final static String[] defaultDefaultNames =
-               new String[] { "index.html", "index.htm", "default.html", 
"default.htm" };
-       
-       public SimpleManifestPutter(ClientCallback cb, ClientRequestScheduler 
sched, 
-                       HashMap bucketsByName, short prioClass, FreenetURI 
target, 
-                       String defaultName, InserterContext ctx, boolean 
getCHKOnly, Object clientContext) throws InserterException {
-               super(prioClass, sched, clientContext);
-               this.defaultName = defaultName;
-               this.targetURI = target;
-               this.cb = cb;
-               this.ctx = ctx;
-               this.getCHKOnly = getCHKOnly;
-               putHandlersByName = new HashMap();
-               runningPutHandlers = new HashSet();
-               putHandlersWaitingForMetadata = new HashSet();
-               waitingForBlockSets = new HashSet();
-               Iterator it = bucketsByName.keySet().iterator();
-               while(it.hasNext()) {
-                       String name = (String) it.next();
-                       Bucket data = (Bucket) bucketsByName.get(name);
-                       String mimeType = DefaultMIMETypes.guessMIMEType(name);
-                       ClientMetadata cm;
-                       if(mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE))
-                               cm = null;
-                       else
-                               cm = new ClientMetadata(mimeType);
-                       PutHandler ph;
-                       try {
-                               ph = new PutHandler(name, data, cm, getCHKOnly);
-                       } catch (InserterException e) {
-                               cancelAndFinish();
-                               throw e;
-                       }
-                       putHandlersByName.put(name, ph);
-                       runningPutHandlers.add(ph);
-                       putHandlersWaitingForMetadata.add(ph);
-               }
-               it = putHandlersByName.values().iterator();
-               while(it.hasNext()) {
-                       PutHandler ph = (PutHandler) it.next();
-                       try {
-                               ph.start();
-                       } catch (InserterException e) {
-                               cancelAndFinish();
-                               throw e;
-                       }
-               }               
-       }
-       
-       public FreenetURI getURI() {
-               return finalURI;
-       }
-
-       public boolean isFinished() {
-               return finished || cancelled;
-       }
-
-       private void gotAllMetadata() {
-               Logger.minor(this, "Got all metadata");
-               HashMap namesToByteArrays = new HashMap();
-               Iterator i = putHandlersByName.values().iterator();
-               while(i.hasNext()) {
-                       PutHandler ph = (PutHandler) i.next();
-                       String name = ph.name;
-                       byte[] meta = ph.metadata;
-                       namesToByteArrays.put(name, meta);
-               }
-               if(defaultName != null) {
-                       byte[] meta = (byte[]) 
namesToByteArrays.get(defaultName);
-                       if(meta == null) {
-                               fail(new 
InserterException(InserterException.INVALID_URI, "Default name "+defaultName+" 
does not exist", null));
-                               return;
-                       }
-                       namesToByteArrays.put("", meta);
-               } else {
-                       for(int j=0;j<defaultDefaultNames.length;j++) {
-                               String name = defaultDefaultNames[j];
-                               byte[] meta = (byte[]) 
namesToByteArrays.get(name);
-                               if(meta != null) {
-                                       namesToByteArrays.put("", meta);
-                                       break;
-                               }
-                       }
-               }
-               Metadata meta =
-                       
Metadata.mkRedirectionManifestWithMetadata(namesToByteArrays);
-               Bucket bucket;
-               try {
-                       bucket = BucketTools.makeImmutableBucket(ctx.bf, 
meta.writeToByteArray());
-               } catch (IOException e) {
-                       fail(new 
InserterException(InserterException.BUCKET_ERROR, e, null));
-                       return;
-               }
-               InsertBlock block =
-                       new InsertBlock(bucket, null, targetURI);
-               try {
-                       SingleFileInserter metadataInserter = 
-                               new SingleFileInserter(this, this, block, true, 
ctx, false, getCHKOnly, false);
-                       this.currentMetadataInserterState = metadataInserter;
-                       metadataInserter.start();
-               } catch (InserterException e) {
-                       fail(e);
-               }
-       }
-       
-       private void insertedAllFiles() {
-               synchronized(this) {
-                       insertedAllFiles = true;
-                       if(finished || cancelled) return;
-                       if(!insertedManifest) return;
-                       finished = true;
-               }
-               complete();
-       }
-       
-       private void complete() {
-               cb.onSuccess(this);
-       }
-
-       private void fail(InserterException e) {
-               // Cancel all, then call the callback
-               cancelAndFinish();
-               
-               cb.onFailure(e, this);
-       }
-
-       private void cancelAndFinish() {
-               PutHandler[] running;
-               synchronized(this) {
-                       if(finished) return;
-                       running = (PutHandler[]) runningPutHandlers.toArray(new 
PutHandler[runningPutHandlers.size()]);
-                       finished = true;
-               }
-               
-               for(int i=0;i<running.length;i++) {
-                       running[i].cancel();
-               }
-       }
-       
-       public void onSuccess(ClientPutState state) {
-               synchronized(this) {
-                       insertedManifest = true;
-                       if(!finished) return;
-                       if(!insertedAllFiles) return;
-                       finished = true;
-               }
-               complete();
-       }
-       
-       public void onFailure(InserterException e, ClientPutState state) {
-               // FIXME check state == currentMetadataInserterState ??
-               fail(e);
-       }
-       
-       public void onEncode(ClientKey key, ClientPutState state) {
-               this.finalURI = key.getURI();
-               Logger.minor(this, "Got metadata key: "+finalURI);
-               cb.onGeneratedURI(finalURI, this);
-       }
-       
-       public void onTransition(ClientPutState oldState, ClientPutState 
newState) {
-               if(oldState == currentMetadataInserterState)
-                       currentMetadataInserterState = newState;
-               else
-                       Logger.error(this, "Current state = 
"+currentMetadataInserterState+" called onTransition(old="+oldState+", 
new="+newState+")", 
-                                       new Exception("debug"));
-       }
-       
-       public void onMetadata(Metadata m, ClientPutState state) {
-               Logger.error(this, "Got metadata from "+state+" on "+this+" 
(metadata inserter = "+currentMetadataInserterState);
-               fail(new InserterException(InserterException.INTERNAL_ERROR));
-       }
-
-       public void notifyClients() {
-               ctx.eventProducer.produceEvent(new 
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks, 
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks, 
this.blockSetFinalized));
-       }
-
-       public void onBlockSetFinished(ClientPutState state) {
-               this.blockSetFinalized();
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SimpleManifestPutter.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SingleBlockInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleBlockInserter.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SingleBlockInserter.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,255 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.lang.ref.SoftReference;
-import java.net.MalformedURLException;
-
-import freenet.client.FailureCodeTracker;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.keys.CHKEncodeException;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.FreenetURI;
-import freenet.keys.InsertableClientSSK;
-import freenet.keys.SSKEncodeException;
-import freenet.node.LowLevelPutException;
-import freenet.node.Node;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-/**
- * Insert *ONE KEY*.
- */
-public class SingleBlockInserter implements SendableInsert, ClientPutState {
-
-       final Bucket sourceData;
-       final short compressionCodec;
-       final FreenetURI uri; // uses essentially no RAM in the common case of 
a CHK because we use FreenetURI.EMPTY_CHK_URI
-       FreenetURI resultingURI;
-       final PutCompletionCallback cb;
-       final BaseClientPutter parent;
-       final InserterContext ctx;
-       private int retries;
-       private final FailureCodeTracker errors;
-       private boolean finished;
-       private ClientKey key;
-       private SoftReference refToClientKeyBlock;
-       final int token; // for e.g. splitfiles
-       final boolean isMetadata;
-       final boolean getCHKOnly;
-       final int sourceLength;
-       private int consecutiveRNFs;
-       
-       public SingleBlockInserter(BaseClientPutter parent, Bucket data, short 
compressionCodec, FreenetURI uri, InserterContext ctx, PutCompletionCallback 
cb, boolean isMetadata, int sourceLength, int token, boolean getCHKOnly, 
boolean addToParent) throws InserterException {
-               this.consecutiveRNFs = 0;
-               this.token = token;
-               this.parent = parent;
-               this.retries = 0;
-               this.finished = false;
-               this.ctx = ctx;
-               errors = new FailureCodeTracker(true);
-               this.cb = cb;
-               this.uri = uri;
-               this.compressionCodec = compressionCodec;
-               this.sourceData = data;
-               this.isMetadata = isMetadata;
-               this.sourceLength = sourceLength;
-               this.getCHKOnly = getCHKOnly;
-               if(addToParent) {
-                       parent.addBlock();
-                       parent.addMustSucceedBlocks(1);
-                       parent.notifyClients();
-               }
-       }
-
-       protected ClientKeyBlock innerEncode() throws InserterException {
-               String uriType = uri.getKeyType().toUpperCase();
-               if(uriType.equals("CHK")) {
-                       try {
-                               return ClientCHKBlock.encode(sourceData, 
isMetadata, compressionCodec == -1, compressionCodec, sourceLength);
-                       } catch (CHKEncodeException e) {
-                               Logger.error(this, "Caught "+e, e);
-                               throw new 
InserterException(InserterException.INTERNAL_ERROR, e, null);
-                       } catch (IOException e) {
-                               Logger.error(this, "Caught "+e, e);
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       }
-               } else if(uriType.equals("SSK") || uriType.equals("KSK")) {
-                       try {
-                               InsertableClientSSK ik = 
InsertableClientSSK.create(uri);
-                               return ik.encode(sourceData, isMetadata, 
compressionCodec == -1, compressionCodec, sourceLength, ctx.random);
-                       } catch (MalformedURLException e) {
-                               throw new 
InserterException(InserterException.INVALID_URI, e, null);
-                       } catch (SSKEncodeException e) {
-                               Logger.error(this, "Caught "+e, e);
-                               throw new 
InserterException(InserterException.INTERNAL_ERROR, e, null);
-                       } catch (IOException e) {
-                               Logger.error(this, "Caught "+e, e);
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       }
-               } else {
-                       throw new 
InserterException(InserterException.INVALID_URI, "Unknown keytype "+uriType, 
null);
-               }
-       }
-
-       protected synchronized ClientKeyBlock encode() throws InserterException 
{
-               if(refToClientKeyBlock != null) {
-                       ClientKeyBlock block = (ClientKeyBlock) 
refToClientKeyBlock.get();
-                       if(block != null) return block;
-               }
-               ClientKeyBlock block = innerEncode();
-               refToClientKeyBlock = 
-                       new SoftReference(block);
-               resultingURI = block.getClientKey().getURI();
-               cb.onEncode(block.getClientKey(), this);
-               return block;
-       }
-       
-       public boolean isInsert() {
-               return true;
-       }
-
-       public short getPriorityClass() {
-               return parent.getPriorityClass();
-       }
-
-       public int getRetryCount() {
-               return retries;
-       }
-
-       public void onFailure(LowLevelPutException e) {
-               if(parent.isCancelled())
-                       fail(new 
InserterException(InserterException.CANCELLED));
-               if(e.code == LowLevelPutException.COLLISION)
-                       fail(new 
InserterException(InserterException.COLLISION));
-               switch(e.code) {
-               case LowLevelPutException.INTERNAL_ERROR:
-                       errors.inc(InserterException.INTERNAL_ERROR);
-                       break;
-               case LowLevelPutException.REJECTED_OVERLOAD:
-                       errors.inc(InserterException.REJECTED_OVERLOAD);
-                       break;
-               case LowLevelPutException.ROUTE_NOT_FOUND:
-                       errors.inc(InserterException.ROUTE_NOT_FOUND);
-                       break;
-               case LowLevelPutException.ROUTE_REALLY_NOT_FOUND:
-                       errors.inc(InserterException.ROUTE_REALLY_NOT_FOUND);
-                       break;
-               default:
-                       Logger.error(this, "Unknown LowLevelPutException code: 
"+e.code);
-                       errors.inc(InserterException.INTERNAL_ERROR);
-               }
-               if(e.code == LowLevelPutException.ROUTE_NOT_FOUND) {
-                       consecutiveRNFs++;
-                       if(consecutiveRNFs == 
ctx.consecutiveRNFsCountAsSuccess) {
-                               Logger.minor(this, "Consecutive RNFs: 
"+consecutiveRNFs+" - counting as success");
-                               onSuccess();
-                               return;
-                       }
-               } else
-                       consecutiveRNFs = 0;
-               Logger.minor(this, "Failed: "+e);
-               if(retries > ctx.maxInsertRetries) {
-                       if(errors.isOneCodeOnly())
-                               fail(new 
InserterException(errors.getFirstCode()));
-                       else
-                               fail(new 
InserterException(InserterException.TOO_MANY_RETRIES_IN_BLOCKS, errors, 
getURI()));
-               }
-               retries++;
-               parent.scheduler.register(this);
-       }
-
-       private void fail(InserterException e) {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               if(e.isFatal())
-                       parent.fatallyFailedBlock();
-               else
-                       parent.failedBlock();
-               cb.onFailure(e, this);
-       }
-
-       public ClientKeyBlock getBlock() {
-               try {
-                       if(finished) return null;
-                       return encode();
-               } catch (InserterException e) {
-                       cb.onFailure(e, this);
-                       return null;
-               } catch (Throwable t) {
-                       Logger.error(this, "Caught "+t, t);
-                       cb.onFailure(new 
InserterException(InserterException.INTERNAL_ERROR, t, null), this);
-                       return null;
-               }
-       }
-
-       public void schedule() throws InserterException {
-               if(finished) return;
-               if(getCHKOnly) {
-                       ClientKeyBlock block = encode();
-                       cb.onEncode(block.getClientKey(), this);
-                       cb.onSuccess(this);
-                       parent.completedBlock(false);
-                       finished = true;
-               } else {
-                       parent.scheduler.register(this);
-               }
-       }
-
-       public FreenetURI getURI() {
-               if(resultingURI == null)
-                       getBlock();
-               return resultingURI;
-       }
-
-       public void onSuccess() {
-               Logger.minor(this, "Succeeded ("+this+"): "+token);
-               synchronized(this) {
-                       finished = true;
-               }
-               parent.completedBlock(false);
-               cb.onSuccess(this);
-       }
-
-       public BaseClientPutter getParent() {
-               return parent;
-       }
-
-       public void cancel() {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               cb.onFailure(new 
InserterException(InserterException.CANCELLED), this);
-       }
-
-       public boolean isFinished() {
-               return finished;
-       }
-
-       public void send(Node node) {
-               try {
-                       Logger.minor(this, "Starting request: "+this);
-                       ClientKeyBlock b = getBlock();
-                       if(b != null)
-                               node.realPut(b, ctx.cacheLocalRequests);
-                       else
-                               fail(new 
InserterException(InserterException.CANCELLED));
-               } catch (LowLevelPutException e) {
-                       onFailure(e);
-                       Logger.minor(this, "Request failed: "+this+" for "+e);
-                       return;
-               }
-               Logger.minor(this, "Request succeeded: "+this);
-               onSuccess();
-       }
-
-       public Object getClient() {
-               return parent.getClient();
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SingleBlockInserter.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/SingleBlockInserter.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileFetcher.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SingleFileFetcher.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,542 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.LinkedList;
-
-import freenet.client.ArchiveContext;
-import freenet.client.ArchiveFailureException;
-import freenet.client.ArchiveRestartException;
-import freenet.client.ArchiveStoreContext;
-import freenet.client.ClientMetadata;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.FetcherContext;
-import freenet.client.Metadata;
-import freenet.client.MetadataParseException;
-import freenet.keys.ClientCHK;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.FreenetURI;
-import freenet.keys.KeyDecodeException;
-import freenet.node.LowLevelGetException;
-import freenet.node.LowLevelPutException;
-import freenet.node.Node;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-public class SingleFileFetcher extends ClientGetState implements SendableGet {
-
-       final ClientGetter parent;
-       //final FreenetURI uri;
-       final ClientKey key;
-       final LinkedList metaStrings;
-       final FetcherContext ctx;
-       final GetCompletionCallback rcb;
-       final ClientMetadata clientMetadata;
-       private Metadata metadata;
-       final int maxRetries;
-       final ArchiveContext actx;
-       /** Archive handler. We can only have one archive handler at a time. */
-       private ArchiveStoreContext ah;
-       private int recursionLevel;
-       /** The URI of the currently-being-processed data, for archives etc. */
-       private FreenetURI thisKey;
-       private int retryCount;
-       private final LinkedList decompressors;
-       private final boolean dontTellClientGet;
-       private boolean cancelled;
-       private Object token;
-       
-       
-       /** Create a new SingleFileFetcher and register self.
-        * Called when following a redirect, or direct from ClientGet.
-        * @param token 
-        * @param dontTellClientGet 
-        */
-       public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb, 
ClientMetadata metadata, ClientKey key, LinkedList metaStrings, FetcherContext 
ctx, ArchiveContext actx, int maxRetries, int recursionLevel, boolean 
dontTellClientGet, Object token, boolean isEssential) throws FetchException {
-               Logger.minor(this, "Creating SingleFileFetcher for "+key);
-               this.cancelled = false;
-               this.dontTellClientGet = dontTellClientGet;
-               this.token = token;
-               this.parent = get;
-               //this.uri = uri;
-               //this.key = ClientKey.getBaseKey(uri);
-               //metaStrings = uri.listMetaStrings();
-               this.key = key;
-               this.metaStrings = metaStrings;
-               this.ctx = ctx;
-               retryCount = 0;
-               this.rcb = cb;
-               this.clientMetadata = metadata;
-               this.maxRetries = maxRetries;
-               thisKey = key.getURI();
-               this.actx = actx;
-               this.recursionLevel = recursionLevel + 1;
-               if(recursionLevel > ctx.maxRecursionLevel)
-                       throw new 
FetchException(FetchException.TOO_MUCH_RECURSION, "Too much recursion: 
"+recursionLevel+" > "+ctx.maxRecursionLevel);
-               this.decompressors = new LinkedList();
-               parent.addBlock();
-               if(isEssential)
-                       parent.addMustSucceedBlocks(1);
-       }
-
-       /** Called by ClientGet. */ 
-       public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb, 
ClientMetadata metadata, FreenetURI uri, FetcherContext ctx, ArchiveContext 
actx, int maxRetries, int recursionLevel, boolean dontTellClientGet, Object 
token, boolean isEssential) throws MalformedURLException, FetchException {
-               this(get, cb, metadata, ClientKey.getBaseKey(uri), 
uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel, 
dontTellClientGet, token, isEssential);
-       }
-       
-       /** Copy constructor, modifies a few given fields, don't call 
schedule() */
-       public SingleFileFetcher(SingleFileFetcher fetcher, Metadata newMeta, 
GetCompletionCallback callback, FetcherContext ctx2) throws FetchException {
-               Logger.minor(this, "Creating SingleFileFetcher for 
"+fetcher.key);
-               this.token = fetcher.token;
-               this.dontTellClientGet = fetcher.dontTellClientGet;
-               this.actx = fetcher.actx;
-               this.ah = fetcher.ah;
-               this.clientMetadata = fetcher.clientMetadata;
-               this.ctx = ctx2;
-               this.key = fetcher.key;
-               this.maxRetries = fetcher.maxRetries;
-               this.metadata = newMeta;
-               this.metaStrings = fetcher.metaStrings;
-               this.parent = fetcher.parent;
-               this.rcb = callback;
-               this.retryCount = 0;
-               this.recursionLevel = fetcher.recursionLevel + 1;
-               if(recursionLevel > ctx.maxRecursionLevel)
-                       throw new 
FetchException(FetchException.TOO_MUCH_RECURSION);
-               this.thisKey = fetcher.thisKey;
-               this.decompressors = fetcher.decompressors;
-       }
-
-       public void schedule() {
-               if(!dontTellClientGet)
-                       this.parent.currentState = this;
-               parent.scheduler.register(this);
-       }
-
-       public ClientGetter getParent() {
-               return parent;
-       }
-
-       public ClientKey getKey() {
-               return key;
-       }
-
-       public short getPriorityClass() {
-               return parent.getPriorityClass();
-       }
-
-       public int getRetryCount() {
-               return retryCount;
-       }
-
-       // Process the completed data. May result in us going to a
-       // splitfile, or another SingleFileFetcher, etc.
-       public void onSuccess(ClientKeyBlock block, boolean fromStore) {
-               parent.completedBlock(fromStore);
-               // Extract data
-               Bucket data;
-               try {
-                       data = block.decode(ctx.bucketFactory, 
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)));
-               } catch (KeyDecodeException e1) {
-                       onFailure(new 
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()));
-                       return;
-               } catch (IOException e) {
-                       Logger.error(this, "Could not capture data - disk 
full?: "+e, e);
-                       onFailure(new 
FetchException(FetchException.BUCKET_ERROR, e));
-                       return;
-               }
-               if(!block.isMetadata()) {
-                       onSuccess(new FetchResult(clientMetadata, data));
-               } else {
-                       if(!ctx.followRedirects) {
-                               onFailure(new 
FetchException(FetchException.INVALID_METADATA, "Told me not to follow 
redirects (splitfile block??)"));
-                               return;
-                       }
-                       if(parent.isCancelled()) {
-                               onFailure(new 
FetchException(FetchException.CANCELLED));
-                               return;
-                       }
-                       if(data.size() > ctx.maxMetadataSize) {
-                               onFailure(new 
FetchException(FetchException.TOO_BIG_METADATA));
-                               return;
-                       }
-                       // Parse metadata
-                       try {
-                               metadata = Metadata.construct(data);
-                       } catch (MetadataParseException e) {
-                               onFailure(new FetchException(e));
-                               return;
-                       } catch (IOException e) {
-                               // Bucket error?
-                               onFailure(new 
FetchException(FetchException.BUCKET_ERROR, e));
-                               return;
-                       }
-                       try {
-                               handleMetadata();
-                       } catch (MetadataParseException e) {
-                               onFailure(new FetchException(e));
-                               return;
-                       } catch (FetchException e) {
-                               onFailure(e);
-                               return;
-                       } catch (ArchiveFailureException e) {
-                               onFailure(new FetchException(e));
-                       } catch (ArchiveRestartException e) {
-                               onFailure(new FetchException(e));
-                       }
-               }
-       }
-
-       private void onSuccess(FetchResult result) {
-               if(!decompressors.isEmpty()) {
-                       Bucket data = result.asBucket();
-                       while(!decompressors.isEmpty()) {
-                               Compressor c = (Compressor) 
decompressors.removeLast();
-                               try {
-                                       data = c.decompress(data, 
ctx.bucketFactory, Math.max(ctx.maxTempLength, ctx.maxOutputLength));
-                               } catch (IOException e) {
-                                       onFailure(new 
FetchException(FetchException.BUCKET_ERROR, e));
-                                       return;
-                               } catch (CompressionOutputSizeException e) {
-                                       onFailure(new 
FetchException(FetchException.TOO_BIG, e));
-                                       return;
-                               }
-                       }
-                       result = new FetchResult(result, data);
-               }
-               rcb.onSuccess(result, this);
-       }
-
-       private void handleMetadata() throws FetchException, 
MetadataParseException, ArchiveFailureException, ArchiveRestartException {
-               while(true) {
-                       if(metadata.isSimpleManifest()) {
-                               String name;
-                               if(metaStrings.isEmpty())
-                                       throw new 
FetchException(FetchException.NOT_ENOUGH_METASTRINGS);
-                               else
-                                       name = (String) 
metaStrings.removeFirst();
-                               // Since metadata is a document, we just 
replace metadata here
-                               if(name == null) {
-                                       metadata = 
metadata.getDefaultDocument();
-                                       if(metadata == null)
-                                               throw new 
FetchException(FetchException.NOT_ENOUGH_METASTRINGS);
-                               } else {
-                                       metadata = metadata.getDocument(name);
-                                       thisKey = thisKey.pushMetaString(name);
-                                       if(metadata == null)
-                                               throw new 
FetchException(FetchException.NOT_IN_ARCHIVE);
-                               }
-                               continue; // loop
-                       } else if(metadata.isArchiveManifest()) {
-                               if(metaStrings.isEmpty() && 
ctx.returnZIPManifests) {
-                                       // Just return the archive, whole.
-                                       metadata.setSimpleRedirect();
-                                       continue;
-                               }
-                               // First we need the archive metadata.
-                               // Then parse it.
-                               // Then we may need to fetch something from 
inside the archive.
-                               ah = (ArchiveStoreContext) 
ctx.archiveManager.makeHandler(thisKey, metadata.getArchiveType(), false);
-                               // ah is set. This means we are currently 
handling an archive.
-                               Bucket metadataBucket;
-                               metadataBucket = ah.getMetadata(actx, null, 
recursionLevel+1, true);
-                               if(metadataBucket != null) {
-                                       try {
-                                               metadata = 
Metadata.construct(metadataBucket);
-                                       } catch (IOException e) {
-                                               // Bucket error?
-                                               throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                                       }
-                               } else {
-                                       fetchArchive(false); // will result in 
this function being called again
-                                       return;
-                               }
-                               continue;
-                       } else if(metadata.isArchiveInternalRedirect()) {
-                               
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even 
splitfiles can have mime types!
-                               // Fetch it from the archive
-                               if(ah == null)
-                                       throw new 
FetchException(FetchException.UNKNOWN_METADATA, "Archive redirect not in an 
archive");
-                               if(metaStrings.isEmpty())
-                                       throw new 
FetchException(FetchException.NOT_ENOUGH_METASTRINGS);
-                               Bucket dataBucket = ah.get((String) 
metaStrings.removeFirst(), actx, null, recursionLevel+1, true);
-                               if(dataBucket != null) {
-                                       // Return the data
-                                       onSuccess(new 
FetchResult(this.clientMetadata, dataBucket));
-                                       return;
-                               } else {
-                                       // Metadata cannot contain pointers to 
files which don't exist.
-                                       // We enforce this in ArchiveHandler.
-                                       // Therefore, the archive needs to be 
fetched.
-                                       fetchArchive(true);
-                                       // Will call back into this function 
when it has been fetched.
-                                       return;
-                               }
-                       } else if(metadata.isMultiLevelMetadata()) {
-                               // Fetch on a second SingleFileFetcher, like 
with archives.
-                               Metadata newMeta = (Metadata) metadata.clone();
-                               newMeta.setSimpleRedirect();
-                               SingleFileFetcher f = new 
SingleFileFetcher(this, newMeta, new MultiLevelMetadataCallback(), ctx);
-                               f.handleMetadata();
-                               return;
-                       } else if(metadata.isSingleFileRedirect()) {
-                               
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even 
splitfiles can have mime types!
-                               // FIXME implement implicit archive support
-                               
-                               // Simple redirect
-                               // Just create a new SingleFileFetcher
-                               // Which will then fetch the target URI, and 
call the rcd.success
-                               // Hopefully!
-                               FreenetURI uri = metadata.getSingleTarget();
-                               Logger.minor(this, "Redirecting to "+uri);
-                               ClientKey key;
-                               try {
-                                       key = ClientKey.getBaseKey(uri);
-                               } catch (MalformedURLException e) {
-                                       throw new 
FetchException(FetchException.INVALID_URI, e);
-                               }
-                               if(key instanceof ClientCHK && 
!((ClientCHK)key).isMetadata())
-                                       rcb.onBlockSetFinished(this);
-                               LinkedList newMetaStrings = 
uri.listMetaStrings();
-                               
-                               // Move any new meta strings to beginning of 
our list of remaining meta strings
-                               while(!newMetaStrings.isEmpty()) {
-                                       Object o = newMetaStrings.removeLast();
-                                       metaStrings.addFirst(o);
-                               }
-
-                               SingleFileFetcher f = new 
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx, 
maxRetries, recursionLevel, false, null, true);
-                               if(metadata.isCompressed()) {
-                                       Compressor codec = 
Compressor.getCompressionAlgorithmByMetadataID(metadata.getCompressionCodec());
-                                       f.addDecompressor(codec);
-                               }
-                               f.schedule();
-                               // All done! No longer our problem!
-                               return;
-                       } else if(metadata.isSplitfile()) {
-                               Logger.minor(this, "Fetching splitfile");
-                               // FIXME implicit archive support
-                               
-                               
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even 
splitfiles can have mime types!
-                               
-                               // Splitfile (possibly compressed)
-                               
-                               if(metadata.isCompressed()) {
-                                       Compressor codec = 
Compressor.getCompressionAlgorithmByMetadataID(metadata.getCompressionCodec());
-                                       addDecompressor(codec);
-                               }
-                               
-                               SplitFileFetcher sf = new 
SplitFileFetcher(metadata, rcb, parent, ctx, 
-                                               decompressors, clientMetadata, 
actx, recursionLevel);
-                               sf.schedule();
-                               rcb.onBlockSetFinished(this);
-                               // SplitFile will now run.
-                               // Then it will return data to rcd.
-                               // We are now out of the loop. Yay!
-                               return;
-                       } else {
-                               Logger.error(this, "Don't know what to do with 
metadata: "+metadata);
-                               throw new 
FetchException(FetchException.UNKNOWN_METADATA);
-                       }
-               }
-       }
-
-       private void addDecompressor(Compressor codec) {
-               decompressors.addLast(codec);
-       }
-
-       private void fetchArchive(boolean forData) throws FetchException, 
MetadataParseException, ArchiveFailureException, ArchiveRestartException {
-               // Fetch the archive
-               // How?
-               // Spawn a separate SingleFileFetcher,
-               // which fetches the archive, then calls
-               // our Callback, which unpacks the archive, then
-               // reschedules us.
-               Metadata newMeta = (Metadata) metadata.clone();
-               newMeta.setSimpleRedirect();
-               SingleFileFetcher f;
-               f = new SingleFileFetcher(this, newMeta, new 
ArchiveFetcherCallback(forData), new FetcherContext(ctx, 
FetcherContext.SET_RETURN_ARCHIVES));
-               f.handleMetadata();
-               // When it is done (if successful), the ArchiveCallback will 
re-call this function.
-               // Which will then discover that the metadata *is* available.
-               // And will also discover that the data is available, and will 
complete.
-       }
-
-       class ArchiveFetcherCallback implements GetCompletionCallback {
-
-               private final boolean wasFetchingFinalData;
-               
-               ArchiveFetcherCallback(boolean wasFetchingFinalData) {
-                       this.wasFetchingFinalData = wasFetchingFinalData;
-               }
-               
-               public void onSuccess(FetchResult result, ClientGetState state) 
{
-                       parent.currentState = SingleFileFetcher.this;
-                       try {
-                               ctx.archiveManager.extractToCache(thisKey, 
ah.getArchiveType(), result.asBucket(), actx, ah);
-                       } catch (ArchiveFailureException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                       } catch (ArchiveRestartException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                       }
-                       try {
-                               handleMetadata();
-                       } catch (MetadataParseException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                       } catch (FetchException e) {
-                               SingleFileFetcher.this.onFailure(e);
-                       } catch (ArchiveFailureException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                       } catch (ArchiveRestartException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                       }
-               }
-
-               public void onFailure(FetchException e, ClientGetState state) {
-                       // Force fatal as the fetcher is presumed to have made 
a reasonable effort.
-                       SingleFileFetcher.this.onFailure(e, true);
-               }
-
-               public void onBlockSetFinished(ClientGetState state) {
-                       if(wasFetchingFinalData) {
-                               rcb.onBlockSetFinished(SingleFileFetcher.this);
-                       }
-               }
-               
-       }
-
-       class MultiLevelMetadataCallback implements GetCompletionCallback {
-               
-               public void onSuccess(FetchResult result, ClientGetState state) 
{
-                       parent.currentState = SingleFileFetcher.this;
-                       try {
-                               metadata = 
Metadata.construct(result.asBucket());
-                               SingleFileFetcher f = new 
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx, 
maxRetries, recursionLevel, dontTellClientGet, null, true);
-                               f.metadata = metadata;
-                               f.handleMetadata();
-                       } catch (MetadataParseException e) {
-                               SingleFileFetcher.this.onFailure(new 
FetchException(e));
-                               return;
-                       } catch (IOException e) {
-                               // Bucket error?
-                               SingleFileFetcher.this.onFailure(new 
FetchException(FetchException.BUCKET_ERROR, e));
-                               return;
-                       } catch (FetchException e) {
-                               onFailure(e, SingleFileFetcher.this);
-                       } catch (ArchiveFailureException e) {
-                               onFailure(new 
FetchException(FetchException.ARCHIVE_FAILURE), SingleFileFetcher.this);
-                       } catch (ArchiveRestartException e) {
-                               onFailure(new 
FetchException(FetchException.ARCHIVE_RESTART), SingleFileFetcher.this);
-                       }
-               }
-               
-               public void onFailure(FetchException e, ClientGetState state) {
-                       // Pass it on; fetcher is assumed to have retried as 
appropriate already, so this is fatal.
-                       SingleFileFetcher.this.onFailure(e, true);
-               }
-
-               public void onBlockSetFinished(ClientGetState state) {
-                       // Ignore as we are fetching metadata here
-               }
-               
-       }
-       
-       private final void onFailure(FetchException e) {
-               onFailure(e, false);
-       }
-       
-       // Real onFailure
-       private void onFailure(FetchException e, boolean forceFatal) {
-               if(parent.isCancelled() || cancelled) {
-                       e = new FetchException(FetchException.CANCELLED);
-               }
-               if(!(e.isFatal() || forceFatal) ) {
-                       if(retryCount <= maxRetries) {
-                               if(parent.isCancelled()) {
-                                       onFailure(new 
FetchException(FetchException.CANCELLED));
-                                       return;
-                               }
-                               retryCount++;
-                               parent.scheduler.register(this);
-                               return;
-                       }
-               }
-               // :(
-               if(e.isFatal() || forceFatal)
-                       parent.fatallyFailedBlock();
-               else
-                       parent.failedBlock();
-               rcb.onFailure(e, this);
-       }
-
-       // Translate it, then call the real onFailure
-       public void onFailure(LowLevelGetException e) {
-               switch(e.code) {
-               case LowLevelGetException.DATA_NOT_FOUND:
-                       onFailure(new 
FetchException(FetchException.DATA_NOT_FOUND));
-                       return;
-               case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
-                       onFailure(new 
FetchException(FetchException.DATA_NOT_FOUND));
-                       return;
-               case LowLevelGetException.DECODE_FAILED:
-                       onFailure(new 
FetchException(FetchException.BLOCK_DECODE_ERROR));
-                       return;
-               case LowLevelGetException.INTERNAL_ERROR:
-                       onFailure(new 
FetchException(FetchException.INTERNAL_ERROR));
-                       return;
-               case LowLevelGetException.REJECTED_OVERLOAD:
-                       onFailure(new 
FetchException(FetchException.REJECTED_OVERLOAD));
-                       return;
-               case LowLevelGetException.ROUTE_NOT_FOUND:
-                       onFailure(new 
FetchException(FetchException.ROUTE_NOT_FOUND));
-                       return;
-               case LowLevelGetException.TRANSFER_FAILED:
-                       onFailure(new 
FetchException(FetchException.TRANSFER_FAILED));
-                       return;
-               case LowLevelGetException.VERIFY_FAILED:
-                       onFailure(new 
FetchException(FetchException.BLOCK_DECODE_ERROR));
-                       return;
-               default:
-                       Logger.error(this, "Unknown LowLevelGetException code: 
"+e.code);
-                       onFailure(new 
FetchException(FetchException.INTERNAL_ERROR));
-                       return;
-               }
-       }
-
-       public Object getToken() {
-               return token;
-       }
-
-       public synchronized void cancel() {
-               cancelled = true;
-       }
-
-       public boolean isFinished() {
-               return cancelled;
-       }
-
-       /** Do the request, blocking. Called by RequestStarter. */
-       public void send(Node node) {
-               // Do we need to support the last 3?
-               ClientKeyBlock block;
-               try {
-                       block = node.realGetKey(key, false, 
ctx.cacheLocalRequests, false);
-               } catch (LowLevelGetException e) {
-                       onFailure(e);
-                       return;
-               } catch (Throwable t) {
-                       onFailure(new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR));
-                       return;
-               }
-               onSuccess(block, false);
-       }
-
-       public Object getClient() {
-               return parent.getClient();
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SingleFileFetcher.java (from 
rev 7998, trunk/freenet/src/freenet/client/async/SingleFileFetcher.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SingleFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileInserter.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SingleFileInserter.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,350 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-
-import freenet.client.InsertBlock;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.keys.CHKBlock;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.FreenetURI;
-import freenet.keys.SSKBlock;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-/**
- * Attempt to insert a file. May include metadata.
- * 
- * This stage:
- * Attempt to compress the file. Off-thread if it will take a while.
- * Then hand it off to SimpleFileInserter.
- */
-class SingleFileInserter implements ClientPutState {
-
-       // Config option???
-       private static final long COMPRESS_OFF_THREAD_LIMIT = 65536;
-       
-       final BaseClientPutter parent;
-       final InsertBlock block;
-       final InserterContext ctx;
-       final boolean metadata;
-       final PutCompletionCallback cb;
-       final boolean getCHKOnly;
-       /** If true, we are not the top level request, and should not
-        * update our parent to point to us as current put-stage. */
-       private boolean cancelled = false;
-       private boolean reportMetadataOnly;
-
-       /**
-        * @param parent
-        * @param cb
-        * @param block
-        * @param metadata
-        * @param ctx
-        * @param dontCompress
-        * @param getCHKOnly
-        * @param reportMetadataOnly If true, don't insert the metadata, just 
report it.
-        * @throws InserterException
-        */
-       SingleFileInserter(BaseClientPutter parent, PutCompletionCallback cb, 
InsertBlock block, 
-                       boolean metadata, InserterContext ctx, boolean 
dontCompress, 
-                       boolean getCHKOnly, boolean reportMetadataOnly) throws 
InserterException {
-               this.reportMetadataOnly = reportMetadataOnly;
-               this.parent = parent;
-               this.block = block;
-               this.ctx = ctx;
-               this.metadata = metadata;
-               this.cb = cb;
-               this.getCHKOnly = getCHKOnly;
-       }
-       
-       public void start() throws InserterException {
-               if((!ctx.dontCompress) && block.getData().size() > 
COMPRESS_OFF_THREAD_LIMIT) {
-                       // Run off thread
-                       OffThreadCompressor otc = new OffThreadCompressor();
-                       Thread t = new Thread(otc, "Compressor for "+this);
-                       t.setDaemon(true);
-                       t.start();
-               } else {
-                       tryCompress();
-               }
-       }
-
-       private class OffThreadCompressor implements Runnable {
-               public void run() {
-                       try {
-                               tryCompress();
-                       } catch (InserterException e) {
-                               cb.onFailure(e, SingleFileInserter.this);
-                       }
-               }
-       }
-       
-       private void tryCompress() throws InserterException {
-               // First, determine how small it needs to be
-               Bucket origData = block.getData();
-               Bucket data = origData;
-               int blockSize;
-               boolean dontCompress = ctx.dontCompress;
-               
-               long origSize = data.size();
-               String type = block.desiredURI.getKeyType().toUpperCase();
-               if(type.equals("SSK") || type.equals("KSK")) {
-                       blockSize = SSKBlock.DATA_LENGTH;
-               } else if(type.equals("CHK")) {
-                       blockSize = CHKBlock.DATA_LENGTH;
-               } else {
-                       throw new 
InserterException(InserterException.INVALID_URI);
-               }
-               
-               Compressor bestCodec = null;
-               Bucket bestCompressedData = null;
-
-               if(origSize > blockSize && (!ctx.dontCompress) && 
(!dontCompress)) {
-                       // Try to compress the data.
-                       // Try each algorithm, starting with the fastest and 
weakest.
-                       // Stop when run out of algorithms, or the compressed 
data fits in a single block.
-                       int algos = Compressor.countCompressAlgorithms();
-                       try {
-                               for(int i=0;i<algos;i++) {
-                                       Compressor comp = 
Compressor.getCompressionAlgorithmByDifficulty(i);
-                                       Bucket result;
-                                       result = comp.compress(origData, 
ctx.bf, Long.MAX_VALUE);
-                                       if(result.size() < blockSize) {
-                                               bestCodec = comp;
-                                               data = result;
-                                               if(bestCompressedData != null)
-                                                       
ctx.bf.freeBucket(bestCompressedData);
-                                               bestCompressedData = data;
-                                               break;
-                                       }
-                                       if(bestCompressedData != null && 
result.size() <  bestCompressedData.size()) {
-                                               
ctx.bf.freeBucket(bestCompressedData);
-                                               bestCompressedData = result;
-                                               data = result;
-                                               bestCodec = comp;
-                                       } else if(bestCompressedData == null && 
result.size() < data.size()) {
-                                               bestCompressedData = result;
-                                               bestCodec = comp;
-                                               data = result;
-                                       }
-                               }
-                       } catch (IOException e) {
-                               throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       } catch (CompressionOutputSizeException e) {
-                               // Impossible
-                               throw new Error(e);
-                       }
-               }
-               
-               // Compressed data
-               
-               // Insert it...
-               short codecNumber = bestCodec == null ? -1 : 
bestCodec.codecNumberForMetadata();
-
-               if(block.getData().size() > Integer.MAX_VALUE)
-                       throw new 
InserterException(InserterException.INTERNAL_ERROR, "2GB+ should not encode to 
one block!", null);
-               
-               if((block.clientMetadata == null || 
block.clientMetadata.isTrivial())) {
-                       if(data.size() < blockSize) {
-                               // Just insert it
-                               SingleBlockInserter bi = new 
SingleBlockInserter(parent, data, codecNumber, block.desiredURI, ctx, cb, 
metadata, (int)block.getData().size(), -1, getCHKOnly, true);
-                               cb.onTransition(this, bi);
-                               bi.schedule();
-                               cb.onBlockSetFinished(this);
-                               return;
-                       }
-               }
-               if (data.size() < ClientCHKBlock.MAX_COMPRESSED_DATA_LENGTH) {
-                       // Insert single block, then insert pointer to it
-                       if(reportMetadataOnly) {
-                               SingleBlockInserter dataPutter = new 
SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, 
cb, metadata, (int)origSize, -1, getCHKOnly, true);
-                               Metadata meta = new 
Metadata(Metadata.SIMPLE_REDIRECT, dataPutter.getURI(), block.clientMetadata);
-                               cb.onMetadata(meta, this);
-                               cb.onTransition(this, dataPutter);
-                               dataPutter.schedule();
-                               cb.onBlockSetFinished(this);
-                       } else {
-                               MultiPutCompletionCallback mcb = 
-                                       new MultiPutCompletionCallback(cb, 
parent);
-                               SingleBlockInserter dataPutter = new 
SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, 
mcb, metadata, (int)origSize, -1, getCHKOnly, true);
-                               Metadata meta = new 
Metadata(Metadata.SIMPLE_REDIRECT, dataPutter.getURI(), block.clientMetadata);
-                               Bucket metadataBucket;
-                               try {
-                                       metadataBucket = 
BucketTools.makeImmutableBucket(ctx.bf, meta.writeToByteArray());
-                               } catch (IOException e) {
-                                       throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                               }
-                               SingleBlockInserter metaPutter = new 
SingleBlockInserter(parent, metadataBucket, (short) -1, block.desiredURI, ctx, 
mcb, true, (int)origSize, -1, getCHKOnly, true);
-                               mcb.addURIGenerator(metaPutter);
-                               mcb.add(dataPutter);
-                               cb.onTransition(this, mcb);
-                               mcb.arm();
-                               dataPutter.schedule();
-                               metaPutter.schedule();
-                               cb.onBlockSetFinished(this);
-                       }
-                       return;
-               }
-               // Otherwise the file is too big to fit into one block
-               // We therefore must make a splitfile
-               // Job of SplitHandler: when the splitinserter has the metadata,
-               // insert it. Then when the splitinserter has finished, and the
-               // metadata insert has finished too, tell the master callback.
-               if(reportMetadataOnly) {
-                       SplitFileInserter sfi = new SplitFileInserter(parent, 
cb, data, bestCodec, block.clientMetadata, ctx, getCHKOnly, metadata);
-                       cb.onTransition(this, sfi);
-                       sfi.start();
-               } else {
-                       SplitHandler sh = new SplitHandler();
-                       SplitFileInserter sfi = new SplitFileInserter(parent, 
sh, data, bestCodec, block.clientMetadata, ctx, getCHKOnly, metadata);
-                       sh.sfi = sfi;
-                       cb.onTransition(this, sh);
-                       sfi.start();
-               }
-               return;
-       }
-       
-       /**
-        * When we get the metadata, start inserting it to our target key.
-        * When we have inserted both the metadata and the splitfile,
-        * call the master callback.
-        */
-       class SplitHandler implements PutCompletionCallback, ClientPutState {
-
-               ClientPutState sfi;
-               ClientPutState metadataPutter;
-               boolean finished = false;
-               boolean splitInsertSuccess = false;
-               boolean metaInsertSuccess = false;
-               boolean splitInsertSetBlocks = false;
-               boolean metaInsertSetBlocks = false;
-
-               public synchronized void onTransition(ClientPutState oldState, 
ClientPutState newState) {
-                       if(oldState == sfi)
-                               sfi = newState;
-                       if(oldState == metadataPutter)
-                               metadataPutter = newState;
-               }
-               
-               public void onSuccess(ClientPutState state) {
-                       Logger.minor(this, "onSuccess("+state+")");
-                       synchronized(this) {
-                               if(finished) return;
-                               if(state == sfi) {
-                                       Logger.minor(this, "Splitfile insert 
succeeded");
-                                       splitInsertSuccess = true;
-                               } else if(state == metadataPutter) {
-                                       Logger.minor(this, "Metadata insert 
succeeded");
-                                       metaInsertSuccess = true;
-                               } else {
-                                       Logger.error(this, "Unknown: "+state, 
new Exception("debug"));
-                               }
-                               if(splitInsertSuccess && metaInsertSuccess) {
-                                       Logger.minor(this, "Both succeeded");
-                                       finished = true;
-                               }
-                               else return;
-                       }
-                       cb.onSuccess(this);
-               }
-
-               public synchronized void onFailure(InserterException e, 
ClientPutState state) {
-                       if(finished) return;
-                       fail(e);
-               }
-
-               public void onMetadata(Metadata meta, ClientPutState state) {
-                       if(finished) return;
-                       if(state == metadataPutter) {
-                               Logger.error(this, "Got metadata for metadata");
-                               onFailure(new 
InserterException(InserterException.INTERNAL_ERROR, "Did not expect to get 
metadata for metadata inserter", null), state);
-                       } else if(state != sfi) {
-                               Logger.error(this, "Got unknown metadata");
-                               onFailure(new 
InserterException(InserterException.INTERNAL_ERROR, "Did not expect to get 
metadata for metadata inserter", null), state);
-                       }
-                       if(reportMetadataOnly) {
-                               cb.onMetadata(meta, this);
-                               metaInsertSuccess = true;
-                       } else {
-                               synchronized(this) {
-                                       Bucket metadataBucket;
-                                       try {
-                                               metadataBucket = 
BucketTools.makeImmutableBucket(ctx.bf, meta.writeToByteArray());
-                                       } catch (IOException e) {
-                                               InserterException ex = new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                                               fail(ex);
-                                               return;
-                                       }
-                                       InsertBlock newBlock = new 
InsertBlock(metadataBucket, null, block.desiredURI);
-                                       try {
-                                               metadataPutter = new 
SingleFileInserter(parent, this, newBlock, true, ctx, false, getCHKOnly, false);
-                                               Logger.minor(this, "Putting 
metadata on "+metadataPutter);
-                                       } catch (InserterException e) {
-                                               cb.onFailure(e, this);
-                                               return;
-                                       }
-                               }
-                               try {
-                                       
((SingleFileInserter)metadataPutter).start();
-                               } catch (InserterException e) {
-                                       fail(e);
-                                       return;
-                               }
-                       }
-               }
-
-               private synchronized void fail(InserterException e) {
-                       Logger.minor(this, "Failing: "+e, e);
-                       if(finished) return;
-                       if(sfi != null)
-                               sfi.cancel();
-                       if(metadataPutter != null)
-                               metadataPutter.cancel();
-                       finished = true;
-                       cb.onFailure(e, this);
-               }
-
-               public BaseClientPutter getParent() {
-                       return parent;
-               }
-
-               public void onEncode(ClientKey key, ClientPutState state) {
-                       if(state == metadataPutter)
-                               cb.onEncode(key, this);
-               }
-
-               public void cancel() {
-                       if(sfi != null)
-                               sfi.cancel();
-                       if(metadataPutter != null)
-                               metadataPutter.cancel();
-               }
-
-               public void onBlockSetFinished(ClientPutState state) {
-                       synchronized(this) {
-                               if(state == sfi)
-                                       splitInsertSetBlocks = true;
-                               else if (state == metadataPutter)
-                                       metaInsertSetBlocks = true;
-                               if(!(splitInsertSetBlocks && 
metaInsertSetBlocks)) 
-                                       return;
-                       }
-                       cb.onBlockSetFinished(this);
-               }
-               
-       }
-
-       public BaseClientPutter getParent() {
-               return parent;
-       }
-
-       public void cancel() {
-               cancelled = true;
-       }
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SingleFileInserter.java 
(from rev 7998, trunk/freenet/src/freenet/client/async/SingleFileInserter.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcher.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcher.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,232 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.LinkedList;
-
-import freenet.client.ArchiveContext;
-import freenet.client.ClientMetadata;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.FetcherContext;
-import freenet.client.Metadata;
-import freenet.client.MetadataParseException;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.keys.NodeCHK;
-import freenet.support.Bucket;
-import freenet.support.Fields;
-import freenet.support.Logger;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-
-/**
- * Fetch a splitfile, decompress it if need be, and return it to the 
GetCompletionCallback.
- * Most of the work is done by the segments, and we do not need a thread.
- */
-public class SplitFileFetcher extends ClientGetState {
-
-       final FetcherContext fetchContext;
-       final ArchiveContext archiveContext;
-       final LinkedList decompressors;
-       final ClientMetadata clientMetadata;
-       final ClientGetter parent;
-       final GetCompletionCallback cb;
-       final int recursionLevel;
-       /** The splitfile type. See the SPLITFILE_ constants on Metadata. */
-       final short splitfileType;
-       /** The segment length. -1 means not segmented and must get everything 
to decode. */
-       final int blocksPerSegment;
-       /** The segment length in check blocks. */
-       final int checkBlocksPerSegment;
-       /** Total number of segments */
-       final int segmentCount;
-       /** The detailed information on each segment */
-       final SplitFileFetcherSegment[] segments;
-       /** The splitfile data blocks. */
-       final FreenetURI[] splitfileDataBlocks;
-       /** The splitfile check blocks. */
-       final FreenetURI[] splitfileCheckBlocks;
-       /** Maximum temporary length */
-       final long maxTempLength;
-       /** Have all segments finished? Access synchronized. */
-       private boolean allSegmentsFinished = false;
-       /** Override length. If this is positive, truncate the splitfile to 
this length. */
-       private final long overrideLength;
-       /** Accept non-full splitfile chunks? */
-       private final boolean splitUseLengths;
-       private boolean finished;
-       
-       public SplitFileFetcher(Metadata metadata, GetCompletionCallback rcb, 
ClientGetter parent,
-                       FetcherContext newCtx, LinkedList decompressors, 
ClientMetadata clientMetadata, 
-                       ArchiveContext actx, int recursionLevel) throws 
FetchException, MetadataParseException {
-               this.finished = false;
-               this.fetchContext = newCtx;
-               this.archiveContext = actx;
-               this.decompressors = decompressors;
-               this.clientMetadata = clientMetadata;
-               this.cb = rcb;
-               this.recursionLevel = recursionLevel + 1;
-               this.parent = parent;
-               if(parent.isCancelled())
-                       throw new FetchException(FetchException.CANCELLED);
-               overrideLength = metadata.dataLength();
-               this.splitfileType = metadata.getSplitfileType();
-               splitfileDataBlocks = metadata.getSplitfileDataKeys();
-               splitfileCheckBlocks = metadata.getSplitfileCheckKeys();
-               splitUseLengths = metadata.splitUseLengths();
-               int blockLength = splitUseLengths ? -1 : NodeCHK.BLOCK_SIZE;
-               if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
-                       // Don't need to do much - just fetch everything and 
piece it together.
-                       blocksPerSegment = -1;
-                       checkBlocksPerSegment = -1;
-                       segmentCount = 1;
-               } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
-                       byte[] params = metadata.splitfileParams();
-                       if(params == null || params.length < 8)
-                               throw new MetadataParseException("No splitfile 
params");
-                       blocksPerSegment = Fields.bytesToInt(params, 0);
-                       checkBlocksPerSegment = Fields.bytesToInt(params, 4);
-                       if(blocksPerSegment > 
fetchContext.maxDataBlocksPerSegment
-                                       || checkBlocksPerSegment > 
fetchContext.maxCheckBlocksPerSegment)
-                               throw new 
FetchException(FetchException.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per 
segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
-                       segmentCount = (splitfileDataBlocks.length / 
blocksPerSegment) +
-                               (splitfileDataBlocks.length % blocksPerSegment 
== 0 ? 0 : 1);
-                       // Onion, 128/192.
-                       // Will be segmented.
-               } else throw new MetadataParseException("Unknown splitfile 
format: "+splitfileType);
-               this.maxTempLength = fetchContext.maxTempLength;
-               Logger.minor(this, "Algorithm: "+splitfileType+", blocks per 
segment: "+blocksPerSegment+", check blocks per segment: 
"+checkBlocksPerSegment+", segments: "+segmentCount);
-               segments = new SplitFileFetcherSegment[segmentCount]; // 
initially null on all entries
-               if(segmentCount == 1) {
-                       segments[0] = new 
SplitFileFetcherSegment(splitfileType, splitfileDataBlocks, 
splitfileCheckBlocks, this, archiveContext, fetchContext, maxTempLength, 
splitUseLengths, recursionLevel);
-               } else {
-                       int dataBlocksPtr = 0;
-                       int checkBlocksPtr = 0;
-                       for(int i=0;i<segments.length;i++) {
-                               // Create a segment. Give it its keys.
-                               int copyDataBlocks = 
Math.min(splitfileDataBlocks.length - dataBlocksPtr, blocksPerSegment);
-                               int copyCheckBlocks = 
Math.min(splitfileCheckBlocks.length - checkBlocksPtr, checkBlocksPerSegment);
-                               FreenetURI[] dataBlocks = new 
FreenetURI[copyDataBlocks];
-                               FreenetURI[] checkBlocks = new 
FreenetURI[copyCheckBlocks];
-                               if(copyDataBlocks > 0)
-                                       System.arraycopy(splitfileDataBlocks, 
dataBlocksPtr, dataBlocks, 0, copyDataBlocks);
-                               if(copyCheckBlocks > 0)
-                                       System.arraycopy(splitfileCheckBlocks, 
checkBlocksPtr, checkBlocks, 0, copyCheckBlocks);
-                               dataBlocksPtr += copyDataBlocks;
-                               checkBlocksPtr += copyCheckBlocks;
-                               segments[i] = new 
SplitFileFetcherSegment(splitfileType, dataBlocks, checkBlocks, this, 
archiveContext, fetchContext, maxTempLength, splitUseLengths, recursionLevel+1);
-                       }
-               }
-       }
-
-       /** Return the final status of the fetch. Throws an exception, or 
returns a 
-        * Bucket containing the fetched data.
-        * @throws FetchException If the fetch failed for some reason.
-        */
-       private Bucket finalStatus() throws FetchException {
-               long finalLength = 0;
-               for(int i=0;i<segments.length;i++) {
-                       SplitFileFetcherSegment s = segments[i];
-                       if(!s.isFinished()) throw new 
IllegalStateException("Not all finished");
-                       s.throwError();
-                       // If still here, it succeeded
-                       finalLength += s.decodedLength();
-                       // Healing is done by Segment
-               }
-               if(finalLength > overrideLength)
-                       finalLength = overrideLength;
-               
-               long bytesWritten = 0;
-               OutputStream os = null;
-               Bucket output;
-               try {
-                       output = 
fetchContext.bucketFactory.makeBucket(finalLength);
-                       os = output.getOutputStream();
-                       for(int i=0;i<segments.length;i++) {
-                               SplitFileFetcherSegment s = segments[i];
-                               long max = (finalLength < 0 ? 0 : (finalLength 
- bytesWritten));
-                               bytesWritten += s.writeDecodedDataTo(os, max);
-                       }
-               } catch (IOException e) {
-                       throw new FetchException(FetchException.BUCKET_ERROR, 
e);
-               } finally {
-                       if(os != null) {
-                               try {
-                                       os.close();
-                               } catch (IOException e) {
-                                       // If it fails to close it may return 
corrupt data.
-                                       throw new 
FetchException(FetchException.BUCKET_ERROR, e);
-                               }
-                       }
-               }
-               return output;
-       }
-
-       public void segmentFinished(SplitFileFetcherSegment segment) {
-               Logger.minor(this, "Finished segment: "+segment);
-               synchronized(this) {
-                       boolean allDone = true;
-                       for(int i=0;i<segments.length;i++)
-                               if(!segments[i].isFinished()) {
-                                       Logger.minor(this, "Segment 
"+segments[i]+" is not finished");
-                                       allDone = false;
-                               }
-                       if(allDone) {
-                               if(allSegmentsFinished)
-                                       Logger.error(this, "Was already 
finished! (segmentFinished("+segment+")");
-                               else {
-                                       allSegmentsFinished = true;
-                                       finish();
-                               }
-                       }
-                       notifyAll();
-               }
-       }
-
-       private void finish() {
-               try {
-                       synchronized(this) {
-                               if(finished) {
-                                       Logger.error(this, "Was already 
finished");
-                                       return;
-                               }
-                               finished = true;
-                       }
-                       Bucket data = finalStatus();
-                       // Decompress
-                       while(!decompressors.isEmpty()) {
-                               Compressor c = (Compressor) 
decompressors.removeLast();
-                               try {
-                                       data = c.decompress(data, 
fetchContext.bucketFactory, Math.max(fetchContext.maxTempLength, 
fetchContext.maxOutputLength));
-                               } catch (IOException e) {
-                                       cb.onFailure(new 
FetchException(FetchException.BUCKET_ERROR, e), this);
-                                       return;
-                               } catch (CompressionOutputSizeException e) {
-                                       cb.onFailure(new 
FetchException(FetchException.TOO_BIG, e), this);
-                                       return;
-                               }
-                       }
-                       cb.onSuccess(new FetchResult(clientMetadata, data), 
this);
-               } catch (FetchException e) {
-                       cb.onFailure(e, this);
-               }
-       }
-
-       public ClientGetter getParent() {
-               return parent;
-       }
-
-       public void schedule() {
-               for(int i=0;i<segments.length;i++) {
-                       segments[i].schedule();
-               }
-               parent.notifyClients();
-       }
-
-       public void cancel() {
-               for(int i=0;i<segments.length;i++)
-                       segments[i].cancel();
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcher.java (from 
rev 7998, trunk/freenet/src/freenet/client/async/SplitFileFetcher.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcherSegment.java 
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,349 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.MalformedURLException;
-
-import freenet.client.ArchiveContext;
-import freenet.client.FECCodec;
-import freenet.client.FailureCodeTracker;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.FetcherContext;
-import freenet.client.Metadata;
-import freenet.client.MetadataParseException;
-import freenet.client.SplitfileBlock;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-
-/**
- * A single segment within a SplitFileFetcher.
- * This in turn controls a large number of SingleFileFetcher's.
- */
-public class SplitFileFetcherSegment implements GetCompletionCallback {
-       
-       final short splitfileType;
-       final FreenetURI[] dataBlocks;
-       final FreenetURI[] checkBlocks;
-       final SingleFileFetcher[] dataBlockStatus;
-       final SingleFileFetcher[] checkBlockStatus;
-       final MinimalSplitfileBlock[] dataBuckets;
-       final MinimalSplitfileBlock[] checkBuckets;
-       final int minFetched;
-       final SplitFileFetcher parentFetcher;
-       final ArchiveContext archiveContext;
-       final FetcherContext fetcherContext;
-       final long maxBlockLength;
-       final boolean nonFullBlocksAllowed;
-       /** Has the segment finished processing? Irreversible. */
-       private boolean finished;
-       private boolean startedDecode;
-       /** Bucket to store the data retrieved, after it has been decoded */
-       private Bucket decodedData;
-       /** Fetch context for block fetches */
-       final FetcherContext blockFetchContext;
-       /** Recursion level */
-       final int recursionLevel;
-       private FetchException failureException;
-       private int fatallyFailedBlocks;
-       private int failedBlocks;
-       private int fetchedBlocks;
-       private final FailureCodeTracker errors;
-       
-       public SplitFileFetcherSegment(short splitfileType, FreenetURI[] 
splitfileDataBlocks, FreenetURI[] splitfileCheckBlocks, SplitFileFetcher 
fetcher, ArchiveContext archiveContext, FetcherContext fetchContext, long 
maxTempLength, boolean splitUseLengths, int recursionLevel) throws 
MetadataParseException, FetchException {
-               this.parentFetcher = fetcher;
-               this.errors = new FailureCodeTracker(false);
-               this.archiveContext = archiveContext;
-               this.splitfileType = splitfileType;
-               dataBlocks = splitfileDataBlocks;
-               checkBlocks = splitfileCheckBlocks;
-               if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
-                       minFetched = dataBlocks.length;
-               } else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
-                       minFetched = dataBlocks.length;
-               } else throw new MetadataParseException("Unknown splitfile 
type"+splitfileType);
-               finished = false;
-               decodedData = null;
-               dataBlockStatus = new SingleFileFetcher[dataBlocks.length];
-               checkBlockStatus = new SingleFileFetcher[checkBlocks.length];
-               dataBuckets = new MinimalSplitfileBlock[dataBlocks.length];
-               checkBuckets = new MinimalSplitfileBlock[checkBlocks.length];
-               for(int i=0;i<dataBuckets.length;i++) {
-                       dataBuckets[i] = new MinimalSplitfileBlock(i);
-               }
-               for(int i=0;i<checkBuckets.length;i++)
-                       checkBuckets[i] = new 
MinimalSplitfileBlock(i+dataBuckets.length);
-               nonFullBlocksAllowed = splitUseLengths;
-               this.fetcherContext = fetchContext;
-               maxBlockLength = maxTempLength;
-               if(splitUseLengths) {
-                       blockFetchContext = new FetcherContext(fetcherContext, 
FetcherContext.SPLITFILE_USE_LENGTHS_MASK);
-                       this.recursionLevel = recursionLevel + 1;
-               } else {
-                       blockFetchContext = new FetcherContext(fetcherContext, 
FetcherContext.SPLITFILE_DEFAULT_BLOCK_MASK);
-                       this.recursionLevel = 0;
-               }
-       }
-
-       public boolean isFinished() {
-               return finished;
-       }
-
-       /** Throw a FetchException, if we have one. Else do nothing. */
-       public synchronized void throwError() throws FetchException {
-               if(failureException != null)
-                       throw failureException;
-       }
-       
-       /** Decoded length? */
-       public long decodedLength() {
-               return decodedData.size();
-       }
-
-       /** Write the decoded segment's data to an OutputStream */
-       public long writeDecodedDataTo(OutputStream os, long truncateLength) 
throws IOException {
-               long len = decodedData.size();
-               if(truncateLength >= 0 && truncateLength < len)
-                       len = truncateLength;
-               BucketTools.copyTo(decodedData, os, Math.min(truncateLength, 
decodedData.size()));
-               return len;
-       }
-
-       /** How many blocks have failed due to running out of retries? */
-       public synchronized int failedBlocks() {
-               return failedBlocks;
-       }
-       
-       /** How many blocks have been successfully fetched? */
-       public synchronized int fetchedBlocks() {
-               return fetchedBlocks;
-       }
-
-       /** How many blocks have currently running requests? */ 
-       public int runningBlocks() {
-               // FIXME implement or throw out
-               return 0;
-       }
-
-       /** How many blocks failed permanently due to fatal errors? */
-       public int fatallyFailedBlocks() {
-               return fatallyFailedBlocks;
-       }
-
-       public synchronized void onSuccess(FetchResult result, ClientGetState 
state) {
-               if(finished) return;
-               Integer token = (Integer) ((SingleFileFetcher)state).getToken();
-               int blockNo = token.intValue();
-               if(blockNo < dataBlocks.length) {
-                       if(dataBlocks[blockNo] == null) {
-                               Logger.error(this, "Block already finished: 
"+blockNo);
-                               return;
-                       }
-                       dataBlocks[blockNo] = null;
-                       dataBuckets[blockNo].setData(result.asBucket());
-               } else if(blockNo < checkBlocks.length + dataBlocks.length) {
-                       blockNo -= dataBlocks.length;
-                       if(checkBlocks[blockNo] == null) {
-                               Logger.error(this, "Check block already 
finished: "+blockNo);
-                               return;
-                       }
-                       checkBlocks[blockNo] = null;
-                       checkBuckets[blockNo].setData(result.asBucket());
-               } else
-                       Logger.error(this, "Unrecognized block number: 
"+blockNo, new Exception("error"));
-               fetchedBlocks++;
-               if(fetchedBlocks >= minFetched)
-                       startDecode();
-       }
-
-       private void startDecode() {
-               synchronized(this) {
-                       if(startedDecode) return;
-                       startedDecode = true;
-               }
-               for(int i=0;i<dataBlockStatus.length;i++) {
-                       SingleFileFetcher f = dataBlockStatus[i];
-                       if(f != null) f.cancel();
-               }
-               for(int i=0;i<checkBlockStatus.length;i++) {
-                       SingleFileFetcher f = checkBlockStatus[i];
-                       if(f != null) f.cancel();
-               }
-               Runnable r = new Decoder();
-               Thread t = new Thread(r, "Decoder for "+this);
-               t.setDaemon(true);
-               t.start();
-       }
-       
-       class Decoder implements Runnable {
-
-               public void run() {
-                       
-                       // Now decode
-                       Logger.minor(this, "Decoding "+this);
-                       
-                       FECCodec codec = FECCodec.getCodec(splitfileType, 
dataBlocks.length, checkBlocks.length);
-                       try {
-                               if(splitfileType != 
Metadata.SPLITFILE_NONREDUNDANT) {
-                                       // FIXME hardcoded block size below.
-                                       codec.decode(dataBuckets, checkBuckets, 
32768, fetcherContext.bucketFactory);
-                                       // Now have all the data blocks (not 
necessarily all the check blocks)
-                               }
-                               
-                               decodedData = 
fetcherContext.bucketFactory.makeBucket(-1);
-                               Logger.minor(this, "Copying data from data 
blocks");
-                               OutputStream os = decodedData.getOutputStream();
-                               for(int i=0;i<dataBlockStatus.length;i++) {
-                                       SplitfileBlock status = dataBuckets[i];
-                                       Bucket data = status.getData();
-                                       BucketTools.copyTo(data, os, 
Long.MAX_VALUE);
-                               }
-                               Logger.minor(this, "Copied data");
-                               os.close();
-                               // Must set finished BEFORE calling 
parentFetcher.
-                               // Otherwise a race is possible that might 
result in it not seeing our finishing.
-                               finished = true;
-                               
parentFetcher.segmentFinished(SplitFileFetcherSegment.this);
-                       } catch (IOException e) {
-                               Logger.minor(this, "Caught bucket error?: "+e, 
e);
-                               finished = true;
-                               failureException = new 
FetchException(FetchException.BUCKET_ERROR);
-                               
parentFetcher.segmentFinished(SplitFileFetcherSegment.this);
-                               return;
-                       }
-                       
-                       // Now heal
-                       
-                       // Encode any check blocks we don't have
-                       if(codec != null) {
-                               try {
-                                       codec.encode(dataBuckets, checkBuckets, 
32768, fetcherContext.bucketFactory);
-                               } catch (IOException e) {
-                                       Logger.error(this, "Bucket error while 
healing: "+e, e);
-                               }
-                       }
-                       
-                       // Now insert *ALL* blocks on which we had at least one 
failure, and didn't eventually succeed
-                       for(int i=0;i<dataBlockStatus.length;i++) {
-                               if(dataBuckets[i].getData() != null) continue;
-                               SingleFileFetcher fetcher = dataBlockStatus[i];
-                               if(fetcher.getRetryCount() == 0) {
-                                       // 80% chance of not inserting, if we 
never tried it
-                                       if(fetcherContext.random.nextInt(5) == 
0) continue;
-                               }
-                               queueHeal(dataBuckets[i].getData());
-                       }
-                       for(int i=0;i<checkBlockStatus.length;i++) {
-                               if(checkBuckets[i].getData() != null) continue;
-                               SingleFileFetcher fetcher = checkBlockStatus[i];
-                               if(fetcher.getRetryCount() == 0) {
-                                       // 80% chance of not inserting, if we 
never tried it
-                                       if(fetcherContext.random.nextInt(5) == 
0) continue;
-                               }
-                               queueHeal(checkBuckets[i].getData());
-                       }
-                       
-                       for(int i=0;i<dataBlocks.length;i++) {
-                               dataBuckets[i] = null;
-                               dataBlockStatus[i] = null;
-                               dataBlocks[i] = null;
-                       }
-                       for(int i=0;i<checkBlocks.length;i++) {
-                               checkBuckets[i] = null;
-                               checkBlockStatus[i] = null;
-                               checkBlocks[i] = null;
-                       }
-               }
-
-       }
-
-       private void queueHeal(Bucket data) {
-               // TODO Auto-generated method stub
-               
-       }
-       
-       /** This is after any retries and therefore is either out-of-retries or 
fatal */
-       public synchronized void onFailure(FetchException e, ClientGetState 
state) {
-               Integer token = (Integer) ((SingleFileFetcher)state).getToken();
-               int blockNo = token.intValue();
-               if(blockNo < dataBlocks.length) {
-                       if(dataBlocks[blockNo] == null) {
-                               Logger.error(this, "Block already finished: 
"+blockNo);
-                               return;
-                       }
-                       dataBlocks[blockNo] = null;
-               } else if(blockNo < checkBlocks.length + dataBlocks.length) {
-                       blockNo -= dataBlocks.length;
-                       if(checkBlocks[blockNo] == null) {
-                               Logger.error(this, "Check block already 
finished: "+blockNo);
-                               return;
-                       }
-                       checkBlocks[blockNo] = null;
-               } else
-                       Logger.error(this, "Unrecognized block number: 
"+blockNo, new Exception("error"));
-               // :(
-               Logger.minor(this, "Permanently failed: "+state+" on "+this);
-               if(e.isFatal())
-                       fatallyFailedBlocks++;
-               else
-                       failedBlocks++;
-               // FIXME this may not be accurate across all the retries?
-               if(e.errorCodes != null)
-                       errors.merge(e.errorCodes);
-               else
-                       errors.inc(new Integer(e.mode), state == null ? 1 : 
((SingleFileFetcher)state).getRetryCount());
-               if(failedBlocks + fatallyFailedBlocks > (dataBlocks.length + 
checkBlocks.length - minFetched)) {
-                       fail(new FetchException(FetchException.SPLITFILE_ERROR, 
errors));
-               }
-       }
-
-       private void fail(FetchException e) {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-                       this.failureException = e;
-               }
-               for(int i=0;i<dataBlockStatus.length;i++) {
-                       SingleFileFetcher f = dataBlockStatus[i];
-                       if(f != null)
-                               f.cancel();
-               }
-               for(int i=0;i<checkBlockStatus.length;i++) {
-                       SingleFileFetcher f = dataBlockStatus[i];
-                       if(f != null)
-                               f.cancel();
-               }
-               parentFetcher.segmentFinished(this);
-       }
-
-       public void schedule() {
-               try {
-                       for(int i=0;i<dataBlocks.length;i++) {
-                               dataBlockStatus[i] =
-                                       new 
SingleFileFetcher(parentFetcher.parent, this, null, dataBlocks[i], 
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries, 
recursionLevel, true, new Integer(i), true);
-                               dataBlockStatus[i].schedule();
-                       }
-                       for(int i=0;i<checkBlocks.length;i++) {
-                               checkBlockStatus[i] =
-                                       new 
SingleFileFetcher(parentFetcher.parent, this, null, checkBlocks[i], 
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries, 
recursionLevel, true, new Integer(dataBlocks.length+i), false);
-                               checkBlockStatus[i].schedule();
-                       }
-               } catch (MalformedURLException e) {
-                       // Invalidates the whole splitfile
-                       fail(new FetchException(FetchException.INVALID_URI, 
"Invalid URI in splitfile"));
-               } catch (Throwable t) {
-                       fail(new FetchException(FetchException.INVALID_URI, t));
-               }
-       }
-
-       public void cancel() {
-               fail(new FetchException(FetchException.CANCELLED));
-       }
-
-       public void onBlockSetFinished(ClientGetState state) {
-               // Ignore; irrelevant
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileFetcherSegment.java 
(from rev 7998, 
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileInserter.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/client/async/SplitFileInserter.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,271 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-import java.util.Vector;
-
-import freenet.client.ClientMetadata;
-import freenet.client.FECCodec;
-import freenet.client.FailureCodeTracker;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.compress.Compressor;
-
-public class SplitFileInserter implements ClientPutState {
-       
-       final BaseClientPutter parent;
-       final InserterContext ctx;
-       final PutCompletionCallback cb;
-       final long dataLength;
-       final short compressionCodec;
-       final short splitfileAlgorithm;
-       final int segmentSize;
-       final int checkSegmentSize;
-       final SplitFileInserterSegment[] segments;
-       final boolean getCHKOnly;
-       final int countCheckBlocks;
-       final int countDataBlocks;
-       private boolean haveSentMetadata;
-       final ClientMetadata cm;
-       final boolean isMetadata;
-       private boolean finished;
-
-       public SplitFileInserter(BaseClientPutter put, PutCompletionCallback 
cb, Bucket data, Compressor bestCodec, ClientMetadata clientMetadata, 
InserterContext ctx, boolean getCHKOnly, boolean isMetadata) throws 
InserterException {
-               this.parent = put;
-               this.finished = false;
-               this.isMetadata = isMetadata;
-               this.cm = clientMetadata;
-               this.getCHKOnly = getCHKOnly;
-               this.cb = cb;
-               this.ctx = ctx;
-               Bucket[] dataBuckets;
-               try {
-                       dataBuckets = BucketTools.split(data, 
ClientCHKBlock.DATA_LENGTH, ctx.bf);
-               } catch (IOException e) {
-                       throw new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-               }
-               countDataBlocks = dataBuckets.length;
-               // Encoding is done by segments
-               if(bestCodec == null)
-                       compressionCodec = -1;
-               else
-                       compressionCodec = bestCodec.codecNumberForMetadata();
-               this.splitfileAlgorithm = ctx.splitfileAlgorithm;
-               this.dataLength = data.size();
-               segmentSize = ctx.splitfileSegmentDataBlocks;
-               checkSegmentSize = splitfileAlgorithm == 
Metadata.SPLITFILE_NONREDUNDANT ? 0 : ctx.splitfileSegmentCheckBlocks;
-               
-               // Create segments
-               segments = splitIntoSegments(segmentSize, dataBuckets);
-               int count = 0;
-               for(int i=0;i<segments.length;i++)
-                       count += segments[i].countCheckBlocks();
-               countCheckBlocks = count;
-       }
-
-       /**
-        * Group the blocks into segments.
-        */
-       private SplitFileInserterSegment[] splitIntoSegments(int segmentSize, 
Bucket[] origDataBlocks) {
-               int dataBlocks = origDataBlocks.length;
-
-               Vector segs = new Vector();
-               
-               // First split the data up
-               if(dataBlocks < segmentSize || segmentSize == -1) {
-                       // Single segment
-                       FECCodec codec = FECCodec.getCodec(splitfileAlgorithm, 
origDataBlocks.length);
-                       SplitFileInserterSegment onlySeg = new 
SplitFileInserterSegment(this, codec, origDataBlocks, ctx, getCHKOnly, 0);
-                       segs.add(onlySeg);
-               } else {
-                       int j = 0;
-                       int segNo = 0;
-                       for(int i=segmentSize;;i+=segmentSize) {
-                               if(i > dataBlocks) i = dataBlocks;
-                               Bucket[] seg = new Bucket[i-j];
-                               System.arraycopy(origDataBlocks, j, seg, 0, 
i-j);
-                               j = i;
-                               for(int x=0;x<seg.length;x++)
-                                       if(seg[x] == null) throw new 
NullPointerException("In splitIntoSegs: "+x+" is null of "+seg.length+" of 
"+segNo);
-                               FECCodec codec = 
FECCodec.getCodec(splitfileAlgorithm, seg.length);
-                               SplitFileInserterSegment s = new 
SplitFileInserterSegment(this, codec, seg, ctx, getCHKOnly, segNo);
-                               segs.add(s);
-                               
-                               if(i == dataBlocks) break;
-                               segNo++;
-                       }
-               }
-               parent.notifyClients();
-               return (SplitFileInserterSegment[]) segs.toArray(new 
SplitFileInserterSegment[segs.size()]);
-       }
-       
-       public void start() throws InserterException {
-               for(int i=0;i<segments.length;i++)
-                       segments[i].start();
-       }
-
-       public void encodedSegment(SplitFileInserterSegment segment) {
-               Logger.minor(this, "Encoded segment "+segment.segNo+" of 
"+this);
-               synchronized(this) {
-                       for(int i=0;i<segments.length;i++) {
-                               if(segments[i] == null || 
!segments[i].isEncoded())
-                                       return;
-                       }
-               }
-               cb.onBlockSetFinished(this);
-       }
-       
-       public void segmentHasURIs(SplitFileInserterSegment segment) {
-               if(haveSentMetadata) {
-                       Logger.error(this, "WTF? Already sent metadata");
-                       return;
-               }
-               
-               synchronized(this) {
-                       for(int i=0;i<segments.length;i++) {
-                               if(!segments[i].hasURIs())
-                                       return;
-                       }
-               }
-               
-               Logger.minor(this, "Have URIs from all segments");
-               boolean missingURIs;
-               Metadata m = null;
-               synchronized(this) {
-                       // Create metadata
-                       FreenetURI[] dataURIs = getDataURIs();
-                       FreenetURI[] checkURIs = getCheckURIs();
-                       
-                       Logger.minor(this, "Data URIs: "+dataURIs.length+", 
check URIs: "+checkURIs.length);
-                       
-                       missingURIs = anyNulls(dataURIs) || anyNulls(checkURIs);
-                       
-                       if(!missingURIs) {
-                               // Create Metadata
-                               m = new Metadata(splitfileAlgorithm, dataURIs, 
checkURIs, segmentSize, checkSegmentSize, cm, dataLength, compressionCodec, 
isMetadata);
-                       }
-                       haveSentMetadata = true;
-               }
-               if(missingURIs) {
-                       Logger.minor(this, "Missing URIs");
-                       // Error
-                       fail(new 
InserterException(InserterException.INTERNAL_ERROR, "Missing URIs after 
encoding", null));
-                       return;
-               } else
-                       cb.onMetadata(m, this);
-       }
-       
-       private void fail(InserterException e) {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               cb.onFailure(e, this);
-       }
-
-       // FIXME move this to somewhere
-       private static boolean anyNulls(Object[] array) {
-               for(int i=0;i<array.length;i++)
-                       if(array[i] == null) return true;
-               return false;
-       }
-
-       private FreenetURI[] getCheckURIs() {
-               // Copy check blocks from each segment into a FreenetURI[].
-               FreenetURI[] uris = new FreenetURI[countCheckBlocks];
-               int x = 0;
-               for(int i=0;i<segments.length;i++) {
-                       FreenetURI[] segURIs = segments[i].getCheckURIs();
-                       if(x + segURIs.length > countCheckBlocks) 
-                               throw new IllegalStateException("x="+x+", 
segURIs="+segURIs.length+", countCheckBlocks="+countCheckBlocks);
-                       System.arraycopy(segURIs, 0, uris, x, segURIs.length);
-                       x += segURIs.length;
-               }
-
-               if(uris.length != x)
-                       throw new IllegalStateException("Total is wrong");
-               
-               return uris;
-       }
-
-       private FreenetURI[] getDataURIs() {
-               // Copy check blocks from each segment into a FreenetURI[].
-               FreenetURI[] uris = new FreenetURI[countDataBlocks];
-               int x = 0;
-               for(int i=0;i<segments.length;i++) {
-                       FreenetURI[] segURIs = segments[i].getDataURIs();
-                       if(x + segURIs.length > countDataBlocks) 
-                               throw new IllegalStateException("x="+x+", 
segURIs="+segURIs.length+", countDataBlocks="+countDataBlocks);
-                       System.arraycopy(segURIs, 0, uris, x, segURIs.length);
-                       x += segURIs.length;
-               }
-
-               if(uris.length != x)
-                       throw new IllegalStateException("Total is wrong");
-               
-               return uris;
-       }
-
-       public BaseClientPutter getParent() {
-               return parent;
-       }
-
-       public void segmentFinished(SplitFileInserterSegment segment) {
-               Logger.minor(this, "Segment finished: "+segment);
-               boolean allGone = true;
-               synchronized(this) {
-                       if(finished) return;
-                       for(int i=0;i<segments.length;i++)
-                               if(!segments[i].isFinished()) allGone = false;
-                       
-                       InserterException e = segment.getException();
-                       if(e != null && e.isFatal()) {
-                               cancel();
-                       } else {
-                               if(!allGone) return;
-                       }
-                       finished = true;
-               }
-               try {
-               // Finished !!
-               FailureCodeTracker tracker = new FailureCodeTracker(true);
-               boolean allSucceeded = true;
-               for(int i=0;i<segments.length;i++) {
-                       InserterException e = segments[i].getException();
-                       if(e == null) continue;
-                       allSucceeded = false;
-                       if(e.errorCodes != null)
-                               tracker.merge(e.errorCodes);
-                       tracker.inc(e.getMode());
-               }
-               if(allSucceeded)
-                       cb.onSuccess(this);
-               else {
-                       InserterException e;
-                       if(tracker.isFatal(true))
-                               cb.onFailure(new 
InserterException(InserterException.FATAL_ERRORS_IN_BLOCKS, tracker, null), 
this);
-                       else
-                               cb.onFailure(new 
InserterException(InserterException.TOO_MANY_RETRIES_IN_BLOCKS, tracker, null), 
this);
-               }
-               } catch (Throwable t) {
-                       // We MUST tell the parent *something*!
-                       Logger.error(this, "Caught "+t, t);
-                       cb.onFailure(new 
InserterException(InserterException.INTERNAL_ERROR), this);
-               }
-       }
-
-       public void cancel() {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               for(int i=0;i<segments.length;i++)
-                       segments[i].cancel();
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserter.java (from 
rev 7998, trunk/freenet/src/freenet/client/async/SplitFileInserter.java)

Deleted: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserterSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserterSegment.java
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,256 +0,0 @@
-package freenet.client.async;
-
-import java.io.IOException;
-
-import freenet.client.FECCodec;
-import freenet.client.FailureCodeTracker;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.Metadata;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.Logger;
-
-public class SplitFileInserterSegment implements PutCompletionCallback {
-
-       final SplitFileInserter parent;
-       final FECCodec splitfileAlgo;
-       final Bucket[] dataBlocks;
-       final Bucket[] checkBlocks;
-       final FreenetURI[] dataURIs;
-       final FreenetURI[] checkURIs;
-       final SingleBlockInserter[] dataBlockInserters;
-       final SingleBlockInserter[] checkBlockInserters;
-       final InserterContext blockInsertContext;
-       final int segNo;
-       private boolean encoded;
-       private boolean finished;
-       private final boolean getCHKOnly;
-       private boolean hasURIs;
-       private InserterException toThrow;
-       private final FailureCodeTracker errors;
-       private int blocksGotURI;
-       private int blocksCompleted;
-       
-       public SplitFileInserterSegment(SplitFileInserter parent, FECCodec 
splitfileAlgo, Bucket[] origDataBlocks, InserterContext blockInsertContext, 
boolean getCHKOnly, int segNo) {
-               this.parent = parent;
-               this.getCHKOnly = getCHKOnly;
-               this.errors = new FailureCodeTracker(true);
-               this.blockInsertContext = blockInsertContext;
-               this.splitfileAlgo = splitfileAlgo;
-               this.dataBlocks = origDataBlocks;
-               int checkBlockCount = splitfileAlgo == null ? 0 : 
splitfileAlgo.countCheckBlocks();
-               checkBlocks = new Bucket[checkBlockCount];
-               checkURIs = new FreenetURI[checkBlockCount];
-               dataURIs = new FreenetURI[origDataBlocks.length];
-               dataBlockInserters = new SingleBlockInserter[dataBlocks.length];
-               checkBlockInserters = new 
SingleBlockInserter[checkBlocks.length];
-               parent.parent.addBlocks(dataURIs.length+checkURIs.length);
-               
parent.parent.addMustSucceedBlocks(dataURIs.length+checkURIs.length);
-               this.segNo = segNo;
-       }
-       
-       public void start() throws InserterException {
-               for(int i=0;i<dataBlockInserters.length;i++) {
-                       dataBlockInserters[i] = 
-                               new SingleBlockInserter(parent.parent, 
dataBlocks[i], (short)-1, FreenetURI.EMPTY_CHK_URI, blockInsertContext, this, 
false, ClientCHKBlock.DATA_LENGTH, i, getCHKOnly, false);
-                       dataBlockInserters[i].schedule();
-               }
-               if(splitfileAlgo == null) {
-                       // Don't need to encode blocks
-               } else {
-                       // Encode blocks
-                       Thread t = new Thread(new EncodeBlocksRunnable(), 
"Blocks encoder");
-                       t.setDaemon(true);
-                       t.start();
-               }
-       }
-       
-       private class EncodeBlocksRunnable implements Runnable {
-               
-               public void run() {
-                       encode();
-               }
-       }
-
-       void encode() {
-               try {
-                       splitfileAlgo.encode(dataBlocks, checkBlocks, 
ClientCHKBlock.DATA_LENGTH, blockInsertContext.bf);
-                       // Start the inserts
-                       for(int i=0;i<checkBlockInserters.length;i++) {
-                               checkBlockInserters[i] = 
-                                       new SingleBlockInserter(parent.parent, 
checkBlocks[i], (short)-1, FreenetURI.EMPTY_CHK_URI, blockInsertContext, this, 
false, ClientCHKBlock.DATA_LENGTH, i + dataBlocks.length, getCHKOnly, false);
-                               checkBlockInserters[i].schedule();
-                       }
-                       // Tell parent only after have started the inserts.
-                       // Because of the counting.
-                       encoded = true;
-                       parent.encodedSegment(this);
-               } catch (IOException e) {
-                       InserterException ex = 
-                               new 
InserterException(InserterException.BUCKET_ERROR, e, null);
-                       finish(ex);
-               } catch (Throwable t) {
-                       InserterException ex = 
-                               new 
InserterException(InserterException.INTERNAL_ERROR, t, null);
-                       finish(ex);
-               }
-       }
-
-       private void finish(InserterException ex) {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-                       toThrow = ex;
-               }
-               parent.segmentFinished(this);
-       }
-
-       private void finish() {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               toThrow = InserterException.construct(errors);
-               parent.segmentFinished(this);
-       }
-       
-       public void onEncode(ClientKey key, ClientPutState state) {
-               SingleBlockInserter sbi = (SingleBlockInserter)state;
-               int x = sbi.token;
-               FreenetURI uri = key.getURI();
-               synchronized(this) {
-                       if(finished) return;
-                       if(x >= dataBlocks.length) {
-                               if(checkURIs[x-dataBlocks.length] != null) {
-                                       Logger.normal(this, "Got uri twice for 
check block "+x+" on "+this);
-                                       return;
-                               }
-                               checkURIs[x-dataBlocks.length] = uri;
-                       } else {
-                               if(dataURIs[x] != null) {
-                                       Logger.normal(this, "Got uri twice for 
data block "+x+" on "+this);
-                                       return;
-                               }
-                               dataURIs[x] = uri;
-                       }
-                       blocksGotURI++;
-                       if(blocksGotURI != dataBlocks.length + 
checkBlocks.length) return;
-                       // Double check
-                       for(int i=0;i<checkURIs.length;i++) {
-                               if(checkURIs[i] == null) {
-                                       Logger.error(this, "Check URI "+i+" is 
null");
-                                       return;
-                               }
-                       }
-                       for(int i=0;i<dataURIs.length;i++) {
-                               if(dataURIs[i] == null) {
-                                       Logger.error(this, "Data URI "+i+" is 
null");
-                                       return;
-                               }
-                       }
-                       hasURIs = true;
-               }
-               parent.segmentHasURIs(this);
-       }
-
-       public void onSuccess(ClientPutState state) {
-               SingleBlockInserter sbi = (SingleBlockInserter)state;
-               int x = sbi.token;
-               if(completed(x)) return;
-               finish();
-       }
-
-       public void onFailure(InserterException e, ClientPutState state) {
-               SingleBlockInserter sbi = (SingleBlockInserter)state;
-               int x = sbi.token;
-               errors.merge(e);
-               if(completed(x)) return;
-               finish();
-       }
-
-       private boolean completed(int x) {
-               synchronized(this) {
-                       if(finished) return true;
-                       if(x >= dataBlocks.length) {
-                               if(checkBlockInserters[x-dataBlocks.length] == 
null) {
-                                       Logger.error(this, "Completed twice: 
check block "+x+" on "+this);
-                                       return true;
-                               }
-                               checkBlockInserters[x-dataBlocks.length] = null;
-                       } else {
-                               if(dataBlockInserters[x] == null) {
-                                       Logger.error(this, "Completed twice: 
data block "+x+" on "+this);
-                                       return true;
-                               }
-                               dataBlockInserters[x] = null;
-                       }
-                       blocksCompleted++;
-                       if(blocksCompleted != dataBlockInserters.length + 
checkBlockInserters.length) return true;
-                       return false;
-               }
-       }
-
-       public boolean isFinished() {
-               return finished;
-       }
-       
-       public boolean isEncoded() {
-               return encoded;
-       }
-
-       public int countCheckBlocks() {
-               return checkBlocks.length;
-       }
-
-       public FreenetURI[] getCheckURIs() {
-               return checkURIs;
-       }
-
-       public FreenetURI[] getDataURIs() {
-               return dataURIs;
-       }
-       
-       InserterException getException() {
-               return toThrow;
-       }
-
-       public void cancel() {
-               synchronized(this) {
-                       if(finished) return;
-                       finished = true;
-               }
-               if(toThrow != null)
-                       toThrow = new 
InserterException(InserterException.CANCELLED);
-               for(int i=0;i<dataBlockInserters.length;i++) {
-                       SingleBlockInserter sbi = dataBlockInserters[i];
-                       if(sbi != null)
-                               sbi.cancel();
-               }
-               for(int i=0;i<checkBlockInserters.length;i++) {
-                       SingleBlockInserter sbi = checkBlockInserters[i];
-                       if(sbi != null)
-                               sbi.cancel();
-               }
-               parent.segmentFinished(this);
-       }
-
-       public void onTransition(ClientPutState oldState, ClientPutState 
newState) {
-               Logger.error(this, "Illegal transition in 
SplitFileInserterSegment: "+oldState+" -> "+newState);
-       }
-
-       public void onMetadata(Metadata m, ClientPutState state) {
-               Logger.error(this, "Got onMetadata from "+state);
-       }
-
-       public void onBlockSetFinished(ClientPutState state) {
-               // Ignore
-               Logger.error(this, "Should not happen: 
onBlockSetFinished("+state+") on "+this);
-       }
-
-       public boolean hasURIs() {
-               return hasURIs;
-       }
-}

Copied: 
branches/freenet-freejvms/src/freenet/client/async/SplitFileInserterSegment.java
 (from rev 7998, 
trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java)

Modified: 
branches/freenet-freejvms/src/freenet/client/events/SimpleBlockPutEvent.java
===================================================================
--- 
branches/freenet-freejvms/src/freenet/client/events/SimpleBlockPutEvent.java    
    2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/events/SimpleBlockPutEvent.java    
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -14,7 +14,7 @@
        }

        public String getDescription() {
-               return "Inserting simple CHK: "+key.getURI();
+               return "Inserting simple key: "+key.getURI();
        }

        public int getCode() {

Modified: 
branches/freenet-freejvms/src/freenet/client/events/SplitfileProgressEvent.java
===================================================================
--- 
branches/freenet-freejvms/src/freenet/client/events/SplitfileProgressEvent.java 
    2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/client/events/SplitfileProgressEvent.java 
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -8,19 +8,22 @@
        public final int fetchedBlocks;
        public final int failedBlocks;
        public final int fatallyFailedBlocks;
-       public final int runningBlocks;
+       public final int minSuccessfulBlocks;
+       public final boolean finalizedTotal;

        public SplitfileProgressEvent(int totalBlocks, int fetchedBlocks, int 
failedBlocks, 
-                       int fatallyFailedBlocks, int runningBlocks) {
+                       int fatallyFailedBlocks, int minSuccessfulBlocks, 
boolean finalizedTotal) {
                this.totalBlocks = totalBlocks;
                this.fetchedBlocks = fetchedBlocks;
                this.failedBlocks = failedBlocks;
                this.fatallyFailedBlocks = fatallyFailedBlocks;
-               this.runningBlocks = runningBlocks;
+               this.minSuccessfulBlocks = minSuccessfulBlocks;
+               this.finalizedTotal = finalizedTotal;
        }

        public String getDescription() {
-               return "Completed 
"+(100*(fetchedBlocks+failedBlocks+fatallyFailedBlocks)/totalBlocks)+"% 
"+fetchedBlocks+"/"+totalBlocks+" (failed "+failedBlocks+", fatally 
"+fatallyFailedBlocks+", running "+runningBlocks+")";
+               return "Completed 
"+(100*(fetchedBlocks)/minSuccessfulBlocks)+"% 
"+fetchedBlocks+"/"+minSuccessfulBlocks+" (failed "+failedBlocks+", fatally 
"+fatallyFailedBlocks+", total "+totalBlocks+")" +
+                       (finalizedTotal ? " (finalized total)" : "");
        }

        public int getCode() {

Copied: branches/freenet-freejvms/src/freenet/clients (from rev 7998, 
trunk/freenet/src/freenet/clients)

Copied: branches/freenet-freejvms/src/freenet/clients/http (from rev 7998, 
trunk/freenet/src/freenet/clients/http)

Deleted: branches/freenet-freejvms/src/freenet/clients/http/FproxyToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/FproxyToadlet.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/FproxyToadlet.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,66 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.MalformedURLException;
-import java.net.URI;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.HighLevelSimpleClient;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-import freenet.support.HTMLEncoder;
-import freenet.support.Logger;
-
-public class FproxyToadlet extends Toadlet {
-
-       public FproxyToadlet(HighLevelSimpleClient client) {
-               super(client);
-       }
-
-       void handleGet(URI uri, ToadletContext ctx)
-                       throws ToadletContextClosedException, IOException {
-               String ks = uri.toString();
-               if(ks.startsWith("/"))
-                       ks = ks.substring(1);
-               FreenetURI key;
-               try {
-                       key = new FreenetURI(ks);
-               } catch (MalformedURLException e) {
-                       this.writeReply(ctx, 400, "text/html", "Invalid key", 
"<html><head><title>Invalid key</title></head><body>Expected a freenet key, but 
got "+HTMLEncoder.encode(ks)+"</body></html>");
-                       return;
-               }
-               try {
-                       Logger.minor(this, "Fproxy fetching "+key);
-                       FetchResult result = fetch(key);
-                       writeReply(ctx, 200, result.getMimeType(), "OK", 
result.asBucket());
-               } catch (FetchException e) {
-                       String msg = e.getMessage();
-                       String extra = "";
-                       if(e.errorCodes != null)
-                               extra = 
"<pre>"+e.errorCodes.toVerboseString()+"</pre>";
-                       this.writeReply(ctx, 500 /* close enough - FIXME 
probably should depend on status code */,
-                                       "text/html", msg, 
"<html><head><title>"+msg+"</title></head><body>Error: 
"+HTMLEncoder.encode(msg)+extra+"</body></html>");
-               } catch (Throwable t) {
-                       Logger.error(this, "Caught "+t, t);
-                       String msg = "<html><head><title>Internal 
Error</title></head><body><h1>Internal Error: please report</h1><pre>";
-                       StringWriter sw = new StringWriter();
-                       PrintWriter pw = new PrintWriter(sw);
-                       t.printStackTrace(pw);
-                       pw.flush();
-                       msg = msg + sw.toString() + "</pre></body></html>";
-                       this.writeReply(ctx, 500, "text/html", "Internal 
Error", msg);
-               }
-       }
-
-       void handlePut(URI uri, Bucket data, ToadletContext ctx)
-                       throws ToadletContextClosedException, IOException {
-               String notSupported = "<html><head><title>Not 
supported</title></head><body>"+
-               "Operation not supported</body>";
-               // FIXME should be 405? Need to let toadlets indicate what is 
allowed maybe in a callback?
-               this.writeReply(ctx, 200, "text/html", "OK", notSupported);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/clients/http/FproxyToadlet.java 
(from rev 7998, trunk/freenet/src/freenet/clients/http/FproxyToadlet.java)

Deleted: 
branches/freenet-freejvms/src/freenet/clients/http/SimpleToadletServer.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/SimpleToadletServer.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/SimpleToadletServer.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,113 +0,0 @@
-package freenet.clients.http;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.URI;
-import java.util.Iterator;
-import java.util.LinkedList;
-
-import freenet.support.FileLoggerHook;
-import freenet.support.Logger;
-
-public class SimpleToadletServer implements ToadletContainer, Runnable {
-
-       public class ToadletElement {
-               public ToadletElement(Toadlet t2, String urlPrefix) {
-                       t = t2;
-                       prefix = urlPrefix;
-               }
-               Toadlet t;
-               String prefix;
-       }
-
-       final int port;
-       private final ServerSocket sock;
-       private final LinkedList toadlets;
-       
-       public SimpleToadletServer(int i) throws IOException {
-               this.port = i;
-               this.sock = new ServerSocket(port, 0, 
InetAddress.getByName("127.0.0.1"));
-               toadlets = new LinkedList();
-               Thread t = new Thread(this, "SimpleToadletServer");
-               t.setDaemon(true);
-               t.start();
-       }
-
-       public void register(Toadlet t, String urlPrefix, boolean atFront) {
-               ToadletElement te = new ToadletElement(t, urlPrefix);
-               if(atFront) toadlets.addFirst(te);
-               else toadlets.addLast(te);
-               t.container = this;
-       }
-
-       public Toadlet findToadlet(URI uri) {
-               Iterator i = toadlets.iterator();
-               while(i.hasNext()) {
-                       ToadletElement te = (ToadletElement) i.next();
-                       
-                       if(uri.getPath().startsWith(te.prefix))
-                               return te.t;
-               }
-               return null;
-       }
-       
-       public static void main(String[] args) throws IOException {
-        File logDir = new File("logs-toadlettest");
-        logDir.mkdir();
-        FileLoggerHook logger = new FileLoggerHook(true, new File(logDir, 
"test-1111").getAbsolutePath(), 
-                       "d (c, t, p): m", "MMM dd, yyyy HH:mm:ss:SSS", 
Logger.MINOR, false, true, 
-                       1024*1024*1024 /* 1GB of old compressed logfiles */);
-        logger.setInterval("5MINUTES");
-        Logger.setupChain();
-        Logger.globalSetThreshold(Logger.MINOR);
-        Logger.globalAddHook(logger);
-        logger.start();
-               SimpleToadletServer server = new SimpleToadletServer(1111);
-               server.register(new TrivialToadlet(null), "", true);
-               System.out.println("Bound to port 1111.");
-               while(true) {
-                       try {
-                               Thread.sleep(100000);
-                       } catch (InterruptedException e) {
-                               // Ignore
-                       }
-               }
-       }
-
-       public void run() {
-               while(true) {
-                       try {
-                               Socket conn = sock.accept();
-                               Logger.minor(this, "Accepted connection");
-                               SocketHandler sh = new SocketHandler(conn);
-                       } catch (IOException e) {
-                               Logger.minor(this, "Got IOException accepting 
conn: "+e, e);
-                               // Ignore
-                               continue;
-                       }
-               }
-       }
-       
-       public class SocketHandler implements Runnable {
-
-               Socket sock;
-               
-               public SocketHandler(Socket conn) {
-                       this.sock = conn;
-                       Thread t = new Thread(this);
-                       t.setDaemon(true);
-                       t.start();
-               }
-
-               public void run() {
-                       Logger.minor(this, "Handling connection");
-                       ToadletContextImpl.handle(sock, 
SimpleToadletServer.this);
-                       Logger.minor(this, "Handled connection");
-               }
-
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/clients/http/SimpleToadletServer.java 
(from rev 7998, trunk/freenet/src/freenet/clients/http/SimpleToadletServer.java)

Deleted: branches/freenet-freejvms/src/freenet/clients/http/Toadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/Toadlet.java 2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/Toadlet.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,98 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-import java.net.URI;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.HighLevelSimpleClient;
-import freenet.client.InsertBlock;
-import freenet.client.InserterException;
-import freenet.keys.FreenetURI;
-import freenet.support.Bucket;
-
-/**
- * Replacement for servlets. Just an easy to use HTTP interface, which is
- * compatible with continuations (eventually). You must extend this class
- * and provide the abstract methods. Apologies but we can't do it as an
- * interface and still have continuation compatibility; we can only 
- * suspend in a member function at this level in most implementations.
- * 
- * When we eventually implement continuations, these will require very
- * little thread overhead: We can suspend while running a freenet
- * request, and only grab a thread when we are either doing I/O or doing
- * computation in the derived class. We can suspend while doing I/O too;
- * on systems with NIO, we use that, on systems without it, we just run
- * the fetch on another (or this) thread. With no need to change any
- * APIs, and no danger of exploding memory use (unlike the traditional
- * NIO servlets approach).
- */
-public abstract class Toadlet {
-
-       protected Toadlet(HighLevelSimpleClient client) {
-               this.client = client;
-       }
-       
-       private final HighLevelSimpleClient client;
-       ToadletContainer container;
-       
-       /**
-        * Handle a GET request.
-        * Must be implemented by the client.
-        * @param uri The URI (relative to this client's document root) to
-        * be fetched.
-        * @throws IOException 
-        * @throws ToadletContextClosedException 
-        */
-       abstract void handleGet(URI uri, ToadletContext ctx) throws 
ToadletContextClosedException, IOException;
-
-       /**
-        * Likewise for a PUT request.
-        */
-       abstract void handlePut(URI uri, Bucket data, ToadletContext ctx) 
throws ToadletContextClosedException, IOException;
-       
-       /**
-        * Client calls from the above messages to run a freenet request.
-        * This method may block (or suspend).
-        */
-       FetchResult fetch(FreenetURI uri) throws FetchException {
-               // For now, just run it blocking.
-               return client.fetch(uri);
-       }
-
-       FreenetURI insert(InsertBlock insert, boolean getCHKOnly) throws 
InserterException {
-               // For now, just run it blocking.
-               return client.insert(insert, getCHKOnly);
-       }
-
-       /**
-        * Client calls to write a reply to the HTTP requestor.
-        */
-       void writeReply(ToadletContext ctx, int code, String mimeType, String 
desc, byte[] data, int offset, int length) throws 
ToadletContextClosedException, IOException {
-               ctx.sendReplyHeaders(code, desc, null, mimeType, length);
-               ctx.writeData(data, offset, length);
-       }
-
-       /**
-        * Client calls to write a reply to the HTTP requestor.
-        */
-       void writeReply(ToadletContext ctx, int code, String mimeType, String 
desc, Bucket data) throws ToadletContextClosedException, IOException {
-               ctx.sendReplyHeaders(code, desc, null, mimeType, data.size());
-               ctx.writeData(data);
-       }
-
-       void writeReply(ToadletContext ctx, int code, String mimeType, String 
desc, String reply) throws ToadletContextClosedException, IOException {
-               byte[] buf = reply.getBytes("ISO-8859-1");
-               ctx.sendReplyHeaders(code, desc, null, mimeType, buf.length);
-               ctx.writeData(buf, 0, buf.length);
-       }
-       
-       /**
-        * Get the client impl. DO NOT call the blocking methods on it!!
-        * Just use it for configuration etc.
-        */
-       protected HighLevelSimpleClient getClientImpl() {
-               return client;
-       }
-       
-}

Copied: branches/freenet-freejvms/src/freenet/clients/http/Toadlet.java (from 
rev 7998, trunk/freenet/src/freenet/clients/http/Toadlet.java)

Deleted: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContainer.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/ToadletContainer.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/ToadletContainer.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,19 +0,0 @@
-package freenet.clients.http;
-
-import java.net.URI;
-
-/** Interface for toadlet containers. Toadlets should register here. */
-public interface ToadletContainer {
-       
-       /** Register a Toadlet. All requests whose URL starts with the given
-        * prefix will be passed to this toadlet.
-        * @param atFront If true, add to front of list (where is checked 
first),
-        * else add to back of list (where is checked last).
-        */
-       public void register(Toadlet t, String urlPrefix, boolean atFront);
-
-       /**
-        * Find a Toadlet by URI.
-        */
-       public Toadlet findToadlet(URI uri);
-}

Copied: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContainer.java (from 
rev 7998, trunk/freenet/src/freenet/clients/http/ToadletContainer.java)

Deleted: branches/freenet-freejvms/src/freenet/clients/http/ToadletContext.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/ToadletContext.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/ToadletContext.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,34 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-
-import freenet.support.Bucket;
-import freenet.support.MultiValueTable;
-
-/**
- * Object represents context for a single request. Is used as a token,
- * when the Toadlet wants to e.g. write a reply.
- */
-public interface ToadletContext {
-
-       /**
-        * Write reply headers.
-        * @param code HTTP code.
-        * @param desc HTTP code description.
-        * @param mvt Any extra headers.
-        * @param mimeType The MIME type of the reply.
-        * @param length The length of the reply.
-        */
-       void sendReplyHeaders(int code, String desc, MultiValueTable mvt, 
String mimeType, long length) throws ToadletContextClosedException, IOException;
-
-       /**
-        * Write data. Note you must send reply headers first.
-        */
-       void writeData(byte[] data, int offset, int length) throws 
ToadletContextClosedException, IOException;
-
-       /**
-        * Write data from a bucket. You must send reply headers first.
-        */
-       void writeData(Bucket data) throws ToadletContextClosedException, 
IOException;
-
-}

Copied: branches/freenet-freejvms/src/freenet/clients/http/ToadletContext.java 
(from rev 7998, trunk/freenet/src/freenet/clients/http/ToadletContext.java)

Deleted: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextClosedException.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/ToadletContextClosedException.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextClosedException.java
       2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,5 +0,0 @@
-package freenet.clients.http;
-
-public class ToadletContextClosedException extends Exception {
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextClosedException.java
 (from rev 7998, 
trunk/freenet/src/freenet/clients/http/ToadletContextClosedException.java)

Deleted: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextImpl.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/ToadletContextImpl.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/ToadletContextImpl.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,250 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.Socket;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Enumeration;
-
-import freenet.support.Bucket;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.MultiValueTable;
-import freenet.support.URLDecoder;
-import freenet.support.URLEncodedFormatException;
-import freenet.support.io.LineReadingInputStream;
-import freenet.support.io.TooLongException;
-
-/**
- * ToadletContext implementation, including all the icky HTTP parsing etc.
- * An actual ToadletContext object represents a request, after we have parsed 
the 
- * headers. It provides methods to send replies.
- * @author root
- *
- */
-public class ToadletContextImpl implements ToadletContext {
-
-       private final Socket sock;
-       private final MultiValueTable headers;
-       private final OutputStream sockOutputStream;
-       /** Is the context closed? If so, don't allow any more writes. This is 
because there
-        * may be later requests.
-        */
-       private boolean closed;
-       
-       public ToadletContextImpl(Socket sock, MultiValueTable headers) throws 
IOException {
-               this.sock = sock;
-               this.headers = headers;
-               this.closed = false;
-               sockOutputStream = sock.getOutputStream();
-       }
-
-       private void close() {
-               closed = true;
-       }
-
-       private void sendMethodNotAllowed(String method, boolean 
shouldDisconnect) throws ToadletContextClosedException, IOException {
-               if(closed) throw new ToadletContextClosedException();
-               MultiValueTable mvt = new MultiValueTable();
-               mvt.put("Allow", "GET, PUT");
-               sendError(sockOutputStream, 405, "Method not allowed", 
shouldDisconnect, mvt);
-       }
-
-       private static void sendError(OutputStream os, int code, String 
message, boolean shouldDisconnect, MultiValueTable mvt) throws IOException {
-               sendError(os, code, message, 
"<html><head><title>"+message+"</title></head><body><h1>"+message+"</h1></body>",
 shouldDisconnect, mvt);
-       }
-       
-       private static void sendError(OutputStream os, int code, String 
message, String htmlMessage, boolean disconnect, MultiValueTable mvt) throws 
IOException {
-               if(mvt == null) mvt = new MultiValueTable();
-               if(disconnect)
-                       mvt.put("Connection", "close");
-               byte[] messageBytes = htmlMessage.getBytes("ISO-8859-1");
-               sendReplyHeaders(os, code, message, mvt, "text/html", 
messageBytes.length);
-               os.write(messageBytes);
-       }
-       
-       private void sendNoToadletError(boolean shouldDisconnect) throws 
ToadletContextClosedException, IOException {
-               if(closed) throw new ToadletContextClosedException();
-               sendError(sockOutputStream, 404, "Service not found", 
shouldDisconnect, null);
-       }
-       
-       private static void sendURIParseError(OutputStream os, boolean 
shouldDisconnect) throws IOException {
-               sendError(os, 400, "URI parse error", shouldDisconnect, null);
-       }
-
-       public void sendReplyHeaders(int replyCode, String replyDescription, 
MultiValueTable mvt, String mimeType, long contentLength) throws 
ToadletContextClosedException, IOException {
-               if(closed) throw new ToadletContextClosedException();
-               sendReplyHeaders(sockOutputStream, replyCode, replyDescription, 
mvt, mimeType, contentLength);
-       }
-
-       static void sendReplyHeaders(OutputStream sockOutputStream, int 
replyCode, String replyDescription, MultiValueTable mvt, String mimeType, long 
contentLength) throws IOException {
-               // Construct headers
-               if(mvt == null)
-                       mvt = new MultiValueTable();
-               if(mimeType != null)
-                       mvt.put("content-type", mimeType);
-               if(contentLength >= 0)
-                       mvt.put("content-length", Long.toString(contentLength));
-               StringBuffer buf = new StringBuffer(1024);
-               buf.append("HTTP/1.1 ");
-               buf.append(replyCode);
-               buf.append(' ');
-               buf.append(replyDescription);
-               buf.append("\r\n");
-               for(Enumeration e = mvt.keys();e.hasMoreElements();) {
-                       String key = (String) e.nextElement();
-                       Object[] list = mvt.getArray(key);
-                       for(int i=0;i<list.length;i++) {
-                               String val = (String) list[i];
-                               buf.append(key);
-                               buf.append(": ");
-                               buf.append(val);
-                               buf.append("\r\n");
-                       }
-               }
-               buf.append("\r\n");
-               sockOutputStream.write(buf.toString().getBytes("US-ASCII"));
-       }
-       
-       /**
-        * Handle an incoming connection. Blocking, obviously.
-        */
-       public static void handle(Socket sock, ToadletContainer container) {
-               try {
-                       InputStream is = sock.getInputStream();
-                       
-                       LineReadingInputStream lis = new 
LineReadingInputStream(is);
-
-                       while(true) {
-                               
-                               String firstLine = lis.readLine(32768, 128);
-                               
-                               Logger.minor(ToadletContextImpl.class, "first 
line: "+firstLine);
-                               
-                               String[] split = firstLine.split(" ");
-                               
-                               if(split.length != 3)
-                                       throw new ParseException("Could not 
parse request line (split.length="+split.length+"): "+firstLine);
-                               
-                               if(!split[2].startsWith("HTTP/1."))
-                                       throw new ParseException("Unrecognized 
protocol "+split[2]);
-                               
-                               URI uri;
-                               try {
-                                       uri = new 
URI(URLDecoder.decode(split[1]));
-                               } catch (URISyntaxException e) {
-                                       
sendURIParseError(sock.getOutputStream(), true);
-                                       return;
-                               } catch (URLEncodedFormatException e) {
-                                       
sendURIParseError(sock.getOutputStream(), true);
-                                       return;
-                               }
-                               
-                               String method = split[0];
-                               
-                               MultiValueTable headers = new MultiValueTable();
-                               
-                               while(true) {
-                                       String line = lis.readLine(32768, 128);
-                                       
//System.out.println("Length="+line.length()+": "+line);
-                                       if(line.length() == 0) break;
-                                       int index = line.indexOf(':');
-                                       String before = line.substring(0, 
index);
-                                       String after = line.substring(index+1);
-                                       after = after.trim();
-                                       headers.put(before, after);
-                               }
-                               
-                               // Handle it.
-                               
-                               Toadlet t = container.findToadlet(uri);
-                               
-                               ToadletContextImpl ctx = new 
ToadletContextImpl(sock, headers);
-                               
-                               boolean shouldDisconnect = 
shouldDisconnectAfterHandled(split[2].equals("HTTP/1.0"), headers);
-                               
-                               if(t == null)
-                                       
ctx.sendNoToadletError(shouldDisconnect);
-                               
-                               if(method.equals("GET")) {
-                                       
-                                       t.handleGet(uri, ctx);
-                                       ctx.close();
-                                       
-                               } else if(method.equals("PUT")) {
-                       
-                                       t.handlePut(uri, null, ctx);
-                                       ctx.close();
-
-                               } else if(method.equals("POST")) {
-                                       
-                                       Logger.error(ToadletContextImpl.class, 
"POST not supported");
-                                       ctx.sendMethodNotAllowed(method, 
shouldDisconnect);
-                                       ctx.close();
-                                       
-                               } else {
-                                       ctx.sendMethodNotAllowed(method, 
shouldDisconnect);
-                                       ctx.close();
-                               }
-                               
-                               if(shouldDisconnect) {
-                                       sock.close();
-                                       return;
-                               }
-                       }
-                       
-               } catch (ParseException e) {
-                       try {
-                               sendError(sock.getOutputStream(), 400, "Parse 
error: "+e.getMessage(), true, null);
-                       } catch (IOException e1) {
-                               // Ignore
-                       }
-               } catch (TooLongException e) {
-                       try {
-                               sendError(sock.getOutputStream(), 400, "Line 
too long parsing headers", true, null);
-                       } catch (IOException e1) {
-                               // Ignore
-                       }
-               } catch (IOException e) {
-                       return;
-               } catch (ToadletContextClosedException e) {
-                       Logger.error(ToadletContextImpl.class, 
"ToadletContextClosedException while handling connection!");
-                       return;
-               }
-       }
-
-       private static boolean shouldDisconnectAfterHandled(boolean isHTTP10, 
MultiValueTable headers) {
-               String connection = (String) headers.get("connection");
-               if(connection != null) {
-                       if(connection.equalsIgnoreCase("close"))
-                               return true;
-                       
-                       if(connection.equalsIgnoreCase("keep-alive"))
-                               return false;
-               }
-               if(!isHTTP10) return true;
-               // WTF?
-               return true;
-       }
-       
-       static class ParseException extends Exception {
-
-               ParseException(String string) {
-                       super(string);
-               }
-
-       }
-
-       public void writeData(byte[] data, int offset, int length) throws 
ToadletContextClosedException, IOException {
-               if(closed) throw new ToadletContextClosedException();
-               sockOutputStream.write(data, offset, length);
-       }
-
-       public void writeData(Bucket data) throws 
ToadletContextClosedException, IOException {
-               if(closed) throw new ToadletContextClosedException();
-               BucketTools.copyTo(data, sockOutputStream, Long.MAX_VALUE);
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/clients/http/ToadletContextImpl.java 
(from rev 7998, trunk/freenet/src/freenet/clients/http/ToadletContextImpl.java)

Deleted: branches/freenet-freejvms/src/freenet/clients/http/TrivialToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/TrivialToadlet.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/clients/http/TrivialToadlet.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,32 +0,0 @@
-package freenet.clients.http;
-
-import java.io.IOException;
-import java.net.URI;
-
-import freenet.client.HighLevelSimpleClient;
-import freenet.support.Bucket;
-import freenet.support.HTMLEncoder;
-
-public class TrivialToadlet extends Toadlet {
-
-       TrivialToadlet(HighLevelSimpleClient client) {
-               super(client);
-       }
-
-       void handleGet(URI uri, ToadletContext ctx) throws 
ToadletContextClosedException, IOException {
-               String fetched = uri.toString();
-               String encFetched = HTMLEncoder.encode(fetched);
-               String reply = "<html><head><title>You requested "+encFetched+
-                       "</title></head><body>You fetched <a 
href=\""+encFetched+"\">"+
-                       encFetched+"</a>.</body></html>";
-               this.writeReply(ctx, 200, "text/html", "OK", reply);
-       }
-
-       void handlePut(URI uri, Bucket data, ToadletContext ctx) throws 
ToadletContextClosedException, IOException {
-               String notSupported = "<html><head><title>Not 
supported</title></head><body>"+
-                       "Operation not supported</body>";
-               // This really should be 405, but then we'd have to put an 
Allow header in.
-               this.writeReply(ctx, 200, "text/html", "OK", notSupported);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/clients/http/TrivialToadlet.java 
(from rev 7998, trunk/freenet/src/freenet/clients/http/TrivialToadlet.java)

Modified: branches/freenet-freejvms/src/freenet/crypt/DSA.java
===================================================================
--- branches/freenet-freejvms/src/freenet/crypt/DSA.java        2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/crypt/DSA.java        2006-02-03 
22:55:27 UTC (rev 7999)
@@ -28,7 +28,7 @@
                                    Random r) {
        BigInteger k;
        do {
-           k=new NativeBigInteger(160, r);
+           k=new NativeBigInteger(256, r);
        } while (k.compareTo(g.getQ())>-1 || k.compareTo(BigInteger.ZERO)==0);
        return sign(g, x, k, m);
     }

Modified: branches/freenet-freejvms/src/freenet/crypt/DSAPrivateKey.java
===================================================================
--- branches/freenet-freejvms/src/freenet/crypt/DSAPrivateKey.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/crypt/DSAPrivateKey.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -25,7 +25,7 @@
     public DSAPrivateKey(DSAGroup g, Random r) {
         BigInteger x;
         do {
-            x = new NativeBigInteger(160, r);
+            x = new NativeBigInteger(256, r);
         } while (x.compareTo(g.getQ()) > -1);
         this.x = x;
     }

Modified: branches/freenet-freejvms/src/freenet/crypt/DSAPublicKey.java
===================================================================
--- branches/freenet-freejvms/src/freenet/crypt/DSAPublicKey.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/crypt/DSAPublicKey.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -2,6 +2,8 @@
 package freenet.crypt;

 import java.math.BigInteger;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
 import java.io.*;

 import freenet.support.HexUtil;
@@ -13,6 +15,8 @@
     private final BigInteger y;
        /** A cache of the hexadecimal string representation of y */
     private final String yAsHexString; 
+    
+    public static final int PADDED_SIZE = 1024;

     private final DSAGroup group;

@@ -38,7 +42,17 @@
                this(g,g.getG().modPow(p.getX(), g.getP()));
     }

-    public BigInteger getY() {
+    public DSAPublicKey(InputStream is) throws IOException {
+               group=(DSAGroup) DSAGroup.read(is);
+               y=Util.readMPI(is);
+               this.yAsHexString = HexUtil.biToHex(y);
+    }
+    
+    public DSAPublicKey(byte[] pubkeyAsBytes) throws IOException {
+       this(new ByteArrayInputStream(pubkeyAsBytes));
+       }
+
+       public BigInteger getY() {
                return y;
     }

@@ -95,9 +109,7 @@
 //    }
 //
     public static CryptoKey read(InputStream i) throws IOException {
-               BigInteger y=Util.readMPI(i);
-               DSAGroup g=(DSAGroup)CryptoKey.read(i);
-               return new DSAPublicKey(g, y);
+               return new DSAPublicKey(i);
     }

     public int keyId() {
@@ -123,6 +135,26 @@
                return bytes;
     }

+    public byte[] asBytesHash() {
+       try {
+                       MessageDigest md256 = 
MessageDigest.getInstance("SHA-256");
+                       return md256.digest(asBytes());
+               } catch (NoSuchAlgorithmException e) {
+                       throw new Error(e);
+               }
+    }
+    
+    public byte[] asPaddedBytes() {
+       byte[] asBytes = asBytes();
+       if(asBytes.length == PADDED_SIZE)
+               return asBytes;
+       if(asBytes.length > PADDED_SIZE)
+               throw new Error("Cannot fit key in "+PADDED_SIZE+" - real size 
is "+asBytes.length);
+       byte[] padded = new byte[PADDED_SIZE];
+       System.arraycopy(asBytes, 0, padded, 0, asBytes.length);
+       return padded;
+    }
+    
     public byte[] fingerprint() {
                synchronized(this) {
                        if(fingerprint == null)

Modified: branches/freenet-freejvms/src/freenet/io/comm/DMT.java
===================================================================
--- branches/freenet-freejvms/src/freenet/io/comm/DMT.java      2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/io/comm/DMT.java      2006-02-03 
22:55:27 UTC (rev 7999)
@@ -23,6 +23,8 @@
 import java.util.List;

 import freenet.keys.Key;
+import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;
 import freenet.support.BitArray;
 import freenet.support.Buffer;
 import freenet.support.ShortBuffer;
@@ -80,6 +82,9 @@
     public static final String STREAM_SEQNO = "streamSequenceNumber";
     public static final String IS_LOCAL = "isLocal";
     public static final String ANY_TIMED_OUT = "anyTimedOut";
+    public static final String PUBKEY_HASH = "pubkeyHash";
+    public static final String NEED_PUB_KEY = "needPubKey";
+    public static final String PUBKEY_AS_BYTES = "pubkeyAsBytes";

        //Diagnostic
        public static final MessageType ping = new MessageType("ping") {{
@@ -499,15 +504,15 @@
     }

     // FNP messages
-    public static final MessageType FNPDataRequest = new 
MessageType("FNPDataRequest") {{
+    public static final MessageType FNPCHKDataRequest = new 
MessageType("FNPCHKDataRequest") {{
         addField(UID, Long.class);
         addField(HTL, Short.class);
         addField(NEAREST_LOCATION, Double.class);
-        addField(FREENET_ROUTING_KEY, Key.class);
+        addField(FREENET_ROUTING_KEY, NodeCHK.class);
     }};

-    public static final Message createFNPDataRequest(long id, short htl, Key 
key, double nearestLocation) {
-        Message msg = new Message(FNPDataRequest);
+    public static final Message createFNPCHKDataRequest(long id, short htl, 
NodeCHK key, double nearestLocation) {
+        Message msg = new Message(FNPCHKDataRequest);
         msg.set(UID, id);
         msg.set(HTL, htl);
         msg.set(FREENET_ROUTING_KEY, key);
@@ -515,6 +520,24 @@
         return msg;
     }

+    public static final MessageType FNPSSKDataRequest = new 
MessageType("FNPSSKDataRequest") {{
+        addField(UID, Long.class);
+        addField(HTL, Short.class);
+        addField(NEAREST_LOCATION, Double.class);
+        addField(FREENET_ROUTING_KEY, NodeSSK.class);
+       addField(NEED_PUB_KEY, Boolean.class);
+    }};
+    
+    public static final Message createFNPSSKDataRequest(long id, short htl, 
NodeSSK key, double nearestLocation, boolean needPubKey) {
+        Message msg = new Message(FNPSSKDataRequest);
+        msg.set(UID, id);
+        msg.set(HTL, htl);
+        msg.set(FREENET_ROUTING_KEY, key);
+        msg.set(NEAREST_LOCATION, nearestLocation);
+        msg.set(NEED_PUB_KEY, needPubKey);
+        return msg;
+    }
+    
     // Hit our tail, try a different node.
     public static final MessageType FNPRejectedLoop = new 
MessageType("FNPRejectLoop") {{
         addField(UID, Long.class);
@@ -560,13 +583,13 @@
         return msg;
     }

-    public static final MessageType FNPDataFound = new 
MessageType("FNPDataFound") {{
+    public static final MessageType FNPCHKDataFound = new 
MessageType("FNPCHKDataFound") {{
         addField(UID, Long.class);
         addField(BLOCK_HEADERS, ShortBuffer.class);
     }};

-    public static final Message createFNPDataFound(long id, byte[] buf) {
-        Message msg = new Message(FNPDataFound);
+    public static final Message createFNPCHKDataFound(long id, byte[] buf) {
+        Message msg = new Message(FNPCHKDataFound);
         msg.set(UID, id);
         msg.set(BLOCK_HEADERS, new ShortBuffer(buf));
         return msg;
@@ -658,15 +681,88 @@

     public static final short DATA_INSERT_REJECTED_VERIFY_FAILED = 1;
     public static final short DATA_INSERT_REJECTED_RECEIVE_FAILED = 2;
+    public static final short DATA_INSERT_REJECTED_SSK_ERROR = 3;

     public static final String getDataInsertRejectedReason(short reason) {
         if(reason == DATA_INSERT_REJECTED_VERIFY_FAILED)
             return "Verify failed";
         else if(reason == DATA_INSERT_REJECTED_RECEIVE_FAILED)
             return "Receive failed";
+        else if(reason == DATA_INSERT_REJECTED_SSK_ERROR)
+               return "SSK error";
         return "Unknown reason code: "+reason;
     }
+
+    public static final MessageType FNPSSKInsertRequest = new 
MessageType("FNPSSKInsertRequest") {{
+       addField(UID, Long.class);
+       addField(HTL, Short.class);
+       addField(FREENET_ROUTING_KEY, NodeSSK.class);
+        addField(NEAREST_LOCATION, Double.class);
+        addField(BLOCK_HEADERS, ShortBuffer.class);
+        addField(PUBKEY_HASH, ShortBuffer.class);
+        addField(DATA, ShortBuffer.class);
+    }};

+       public static Message createFNPSSKInsertRequest(long uid, short htl, 
NodeSSK myKey, double closestLocation, byte[] headers, byte[] data, byte[] 
pubKeyHash) {
+               Message msg = new Message(FNPSSKInsertRequest);
+               msg.set(UID, uid);
+               msg.set(HTL, htl);
+               msg.set(FREENET_ROUTING_KEY, myKey);
+               msg.set(NEAREST_LOCATION, closestLocation);
+               msg.set(BLOCK_HEADERS, new ShortBuffer(headers));
+               msg.set(PUBKEY_HASH, new ShortBuffer(pubKeyHash));
+               msg.set(DATA, new ShortBuffer(data));
+               return msg;
+       }
+
+       public static final MessageType FNPSSKDataFound = new 
MessageType("FNPSSKDataFound") {{
+       addField(UID, Long.class);
+        addField(BLOCK_HEADERS, ShortBuffer.class);
+        addField(DATA, ShortBuffer.class);
+       }};
+       
+       public static Message createFNPSSKDataFound(long uid, byte[] headers, 
byte[] data) {
+               Message msg = new Message(FNPSSKDataFound);
+               msg.set(UID, uid);
+               msg.set(BLOCK_HEADERS, new ShortBuffer(headers));
+               msg.set(DATA, new ShortBuffer(data));
+               return msg;
+       }
+       
+       public static MessageType FNPSSKAccepted = new 
MessageType("FNPSSKAccepted") {{
+               addField(UID, Long.class);
+               addField(NEED_PUB_KEY, Boolean.class);
+       }};
+       
+       public static final Message createFNPSSKAccepted(long uid, boolean 
needPubKey) {
+               Message msg = new Message(FNPSSKAccepted);
+               msg.set(UID, uid);
+               msg.set(NEED_PUB_KEY, needPubKey);
+               return msg;
+       }
+       
+       public static MessageType FNPSSKPubKey = new 
MessageType("FNPSSKPubKey") {{
+               addField(UID, Long.class);
+               addField(PUBKEY_AS_BYTES, ShortBuffer.class);
+       }};
+       
+       public static Message createFNPSSKPubKey(long uid, byte[] pubkey) {
+               Message msg = new Message(FNPSSKPubKey);
+               msg.set(UID, uid);
+               msg.set(PUBKEY_AS_BYTES, new ShortBuffer(pubkey));
+               return msg;
+       }
+       
+       public static MessageType FNPSSKPubKeyAccepted = new 
MessageType("FNPSSKPubKeyAccepted") {{
+               addField(UID, Long.class);
+       }};
+       
+       public static Message createFNPSSKPubKeyAccepted(long uid) {
+               Message msg = new Message(FNPSSKPubKeyAccepted);
+               msg.set(UID, uid);
+               return msg;
+       }
+       
     public static final MessageType FNPPing = new MessageType("FNPPing") {{
         addField(PING_SEQNO, Integer.class);
     }};
@@ -812,7 +908,7 @@
         msg.set(HTL, htl);
         return msg;
     }
-    
+
        public static void init() { }

 }

Copied: branches/freenet-freejvms/src/freenet/io/comm/IOStatisticCollector.java 
(from rev 7998, trunk/freenet/src/freenet/io/comm/IOStatisticCollector.java)

Modified: branches/freenet-freejvms/src/freenet/io/comm/Peer.java
===================================================================
--- branches/freenet-freejvms/src/freenet/io/comm/Peer.java     2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/io/comm/Peer.java     2006-02-03 
22:55:27 UTC (rev 7999)
@@ -73,7 +73,7 @@
             throw new PeerParseException(e);
         }
     }
-
+    
     public boolean isNull() {
                return _port == 0;
        }
@@ -110,11 +110,25 @@
        }

        public String toString() {
-               return (_address != null ? _address.getHostAddress() : "null") 
+ ":" + _port;
+               return (_address != null ? getHostName(_address) : "null") + 
":" + _port;
        }

        public void writeToDataOutputStream(DataOutputStream dos) throws 
IOException {
                dos.write(_address.getAddress());
                dos.writeInt(_port);
        }
+
+       /**
+        * Return the hostname or the IP address of the given InetAddress.
+        * Does not attempt to do a reverse lookup; if the hostname is
+        * known, return it, otherwise return the textual IP address.
+        */
+       public static String getHostName(InetAddress primaryIPAddress) {
+               String s = primaryIPAddress.toString();
+               String addr = s.substring(0, s.indexOf('/')).trim();
+               if(addr.length() == 0)
+                       return primaryIPAddress.getHostAddress();
+               else
+                       return addr;
+       }
 }
\ No newline at end of file

Modified: branches/freenet-freejvms/src/freenet/io/comm/UdpSocketManager.java
===================================================================
--- branches/freenet-freejvms/src/freenet/io/comm/UdpSocketManager.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/io/comm/UdpSocketManager.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -94,40 +94,36 @@

        public void run() { // Listen for packets
                try {
-                       while (/*_active*/true) {
-                               try {
-                                       DatagramPacket packet = getPacket();
-                                       // Check for timedout _filters
-                                       removeTimedOutFilters();
-                                       // Check for matched _filters
-                                       if (packet != null) {
-                                               Peer peer = new 
Peer(packet.getAddress(), packet.getPort());
-                                               byte[] data = packet.getData();
-                                               int offset = packet.getOffset();
-                                               int length = packet.getLength();
-                                               if (lowLevelFilter != null) {
-                                                       try {
-                                                               
lowLevelFilter.process(data, offset, length, peer);
-                                                               
Logger.minor(this,
-                                                                               
"Successfully handled packet length " + length);
-                                                       } catch (Throwable t) {
-                                                               
Logger.error(this, "Caught " + t + " from "
-                                                                               
+ lowLevelFilter, t);
-                                                       }
-                                               } else {
-                                                       // Create a bogus 
context since no filter
-                                                       Message m = 
decodePacket(data, offset, length,
-                                                                       new 
DummyPeerContext(peer));
-                                                       if (m != null)
-                                                               checkFilters(m);
-                                               }
-                                       } else
-                                               Logger.minor(this, "Null 
packet");
-                               } catch (Throwable t) {
-                                       Logger.error(this, "Caught " + t, t);
-                               }
-                       }
+                       runLoop();
+               } catch (Throwable t) {
+                       // Impossible? It keeps on exiting. We get the below,
+                       // but not this...
+                       try {
+                               System.err.print(t.getClass().getName());
+                               System.err.println();
+                       } catch (Throwable tt) {};
+                       try {
+                               System.err.print(t.getMessage());
+                               System.err.println();
+                       } catch (Throwable tt) {};
+                       try {
+                               System.gc();
+                               System.runFinalization();
+                               System.gc();
+                               System.runFinalization();
+                       } catch (Throwable tt) {}
+                       try {
+                               Runtime r = Runtime.getRuntime();
+                               System.err.print(r.freeMemory());
+                               System.err.println();
+                               System.err.print(r.totalMemory());
+                               System.err.println();
+                       } catch (Throwable tt) {};
+                       try {
+                               t.printStackTrace();
+                       } catch (Throwable tt) {};
                } finally {
+                       System.err.println("run() exiting");
                        Logger.error(this, "run() exiting");
                        synchronized (this) {
                                _isDone = true;
@@ -136,6 +132,49 @@
                }
        }

+       private void runLoop() {
+               while (/*_active*/true) {
+                       try {
+                               realRun();
+                       } catch (Throwable t) {
+                               Logger.error(this, "Caught " + t, t);
+                               System.err.println("Caught "+t);
+                               t.printStackTrace(System.err);
+                       }
+               }
+       }
+       
+       private void realRun() {
+               DatagramPacket packet = getPacket();
+               // Check for timedout _filters
+               removeTimedOutFilters();
+               // Check for matched _filters
+               if (packet != null) {
+                       Peer peer = new Peer(packet.getAddress(), 
packet.getPort());
+                       byte[] data = packet.getData();
+                       int offset = packet.getOffset();
+                       int length = packet.getLength();
+                       if (lowLevelFilter != null) {
+                               try {
+                                       Logger.minor(this, "Processing packet 
of length "+length+" from "+peer);
+                                       lowLevelFilter.process(data, offset, 
length, peer);
+                                       Logger.minor(this,
+                                                       "Successfully handled 
packet length " + length);
+                               } catch (Throwable t) {
+                                       Logger.error(this, "Caught " + t + " 
from "
+                                                       + lowLevelFilter, t);
+                               }
+                       } else {
+                               // Create a bogus context since no filter
+                               Message m = decodePacket(data, offset, length,
+                                               new DummyPeerContext(peer));
+                               if (m != null)
+                                       checkFilters(m);
+                       }
+               } else
+                       Logger.minor(this, "Null packet");
+       }
+       
        /**
         * Decode a packet from data and a peer.
         * Can be called by LowLevelFilter's.
@@ -169,11 +208,15 @@
                DatagramPacket packet = new DatagramPacket(new byte[1500], 
1500);
                try {
                        _sock.receive(packet);
+                       // TODO: keep?
+                       IOStatisticCollector.addInfo(packet.getAddress() + ":" 
+ packet.getPort(),
+                                       packet.getLength(), 0);
                } catch (SocketTimeoutException e1) {
                        packet = null;
                } catch (IOException e2) {
                        throw new RuntimeException(e2);
                }
+               Logger.minor(this, "Received packet");
                return packet;
        }

@@ -322,10 +365,11 @@
                                        i.remove();
                                        ret = m;
                                        Logger.debug(this, "Matching from 
_unclaimed");
+                                       break;
                                }
                        }
-                       Logger.minor(this, "Not in _unclaimed");
                        if (ret == null) {
+                               Logger.minor(this, "Not in _unclaimed");
                            // Insert filter into filter list in order of 
timeout
                                ListIterator i = _filters.listIterator();
                                while (true) {
@@ -354,7 +398,7 @@
                                        // Precaution against filter getting 
matched between being added to _filters and
                                        // here - bug discovered by Mason
                                    boolean fmatched = false;
-                                   while(!(fmatched = filter.matched() || 
filter.droppedConnection() != null)) {
+                                   while(!(fmatched = (filter.matched() || 
filter.droppedConnection() != null))) {
                                        long wait = 
filter.getTimeout()-System.currentTimeMillis();
                                        if(wait > 0)
                                            filter.wait(wait);
@@ -429,6 +473,10 @@
                DatagramPacket packet = new DatagramPacket(blockToSend, 
blockToSend.length);
                packet.setAddress(destination.getAddress());
                packet.setPort(destination.getPort());
+               
+               // TODO: keep?
+               IOStatisticCollector.addInfo(packet.getAddress() + ":" + 
packet.getPort(),
+                               0, packet.getLength());
                try {
                        _sock.send(packet);
                } catch (IOException e) {

Modified: branches/freenet-freejvms/src/freenet/io/xfer/BlockTransmitter.java
===================================================================
--- branches/freenet-freejvms/src/freenet/io/xfer/BlockTransmitter.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/io/xfer/BlockTransmitter.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -294,10 +294,12 @@
                                MessageFilter mfAllReceived = 
MessageFilter.create().setType(DMT.allReceived).setField(DMT.UID, 
_uid).setTimeout(SEND_TIMEOUT).setSource(_destination);
                                MessageFilter mfSendAborted = 
MessageFilter.create().setType(DMT.sendAborted).setField(DMT.UID, 
_uid).setTimeout(SEND_TIMEOUT).setSource(_destination);
                 msg = 
_usm.waitFor(mfMissingPacketNotification.or(mfAllReceived.or(mfSendAborted)));
+                Logger.minor(this, "Got "+msg);
             } catch (DisconnectedException e) {
                // Ignore, see below
                msg = null;
             }
+            Logger.minor(this, "Got "+msg);
             if(!_destination.isConnected()) {
                 Logger.normal(this, "Terminating send "+_uid+" to 
"+_destination+" from "+_usm.getPortNumber()+" because node disconnected while 
waiting");
                 synchronized(_senderThread) {

Modified: branches/freenet-freejvms/src/freenet/keys/CHKBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/CHKBlock.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/CHKBlock.java    2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,26 +1,8 @@
 package freenet.keys;

-import java.io.IOException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
-import java.util.Arrays;

-import freenet.crypt.BlockCipher;
-import freenet.crypt.PCFBMode;
-import freenet.crypt.UnsupportedCipherException;
-import freenet.crypt.ciphers.Rijndael;
-import freenet.node.Node;
-import freenet.support.ArrayBucket;
-import freenet.support.ArrayBucketFactory;
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-import freenet.support.BucketTools;
-import freenet.support.Logger;
-import freenet.support.SimpleReadOnlyArrayBucket;
-import freenet.support.compress.CompressionOutputSizeException;
-import freenet.support.compress.Compressor;
-import freenet.support.compress.DecompressException;
-
 import gnu.crypto.hash.Sha256;
 import gnu.crypto.Registry;
 import gnu.crypto.hash.HashFactory;
@@ -40,10 +22,12 @@
 public class CHKBlock implements KeyBlock {

     final byte[] data;
-    final byte[] header;
+    final byte[] headers;
     final short hashIdentifier;
     final NodeCHK chk;
     public static final int MAX_LENGTH_BEFORE_COMPRESSION = Integer.MAX_VALUE;
+    public static final int TOTAL_HEADERS_LENGTH = 36;
+    public static final int DATA_LENGTH = 32768;

     public String toString() {
         return super.toString()+": chk="+chk;
@@ -52,8 +36,8 @@
     /**
      * @return The header for this key. DO NOT MODIFY THIS DATA!
      */
-    public byte[] getHeader() {
-        return header;
+    public byte[] getHeaders() {
+        return headers;
     }

     /**
@@ -69,9 +53,10 @@

     public CHKBlock(byte[] data2, byte[] header2, NodeCHK key, boolean verify) 
throws CHKVerifyException {
         data = data2;
-        header = header2;
-        if(header.length < 2) throw new IllegalArgumentException("Too short: 
"+header.length);
-        hashIdentifier = (short)(((header[0] & 0xff) << 8) + (header[1] & 
0xff));
+        headers = header2;
+        if(headers.length != TOTAL_HEADERS_LENGTH)
+               throw new IllegalArgumentException("Wrong length: 
"+headers.length+" should be "+TOTAL_HEADERS_LENGTH);
+        hashIdentifier = (short)(((headers[0] & 0xff) << 8) + (headers[1] & 
0xff));
         this.chk = key;
 //        Logger.debug(CHKBlock.class, "Data length: "+data.length+", header 
length: "+header.length);
         if(!verify) return;
@@ -87,7 +72,7 @@
             throw new Error(e);
         }

-        md.update(header);
+        md.update(headers);
         md.update(data);
         byte[] hash = md.digest();
         byte[] check = chk.routingKey;
@@ -202,4 +187,12 @@
        public Key getKey() {
         return chk;
     }
+
+       public byte[] getRawHeaders() {
+               return headers;
+       }
+
+       public byte[] getRawData() {
+               return data;
+       }
 }

Modified: branches/freenet-freejvms/src/freenet/keys/CHKEncodeException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/CHKEncodeException.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/CHKEncodeException.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -6,7 +6,7 @@
  * Exception thrown when a CHK encoding fails.
  * Specifically, it is thrown when the data is too big to encode.
  */
-public class CHKEncodeException extends Exception {
+public class CHKEncodeException extends KeyEncodeException {
        static final long serialVersionUID = -1;
     public CHKEncodeException() {
         super();

Modified: branches/freenet-freejvms/src/freenet/keys/CHKVerifyException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/CHKVerifyException.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/CHKVerifyException.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -5,7 +5,7 @@
  * 
  * Exception thrown when a CHK doesn't verify.
  */
-public class CHKVerifyException extends Exception {
+public class CHKVerifyException extends KeyVerifyException {
        static final long serialVersionUID = -1;

        /**

Modified: branches/freenet-freejvms/src/freenet/keys/ClientCHK.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientCHK.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientCHK.java   2006-02-03 
22:55:27 UTC (rev 7999)
@@ -32,8 +32,6 @@
     static final short EXTRA_LENGTH = 5;
     /** The length of the decryption key */
     static final short CRYPTO_KEY_LENGTH = 32;
-    /** Code for 256-bit AES with PCFB */
-    static final short ALGO_AES_PCFB_256 = 1;

     /**
      * @param routingKey The routing key. This is the overall hash of the
@@ -68,7 +66,7 @@
         if(extra == null || extra.length < 5)
             throw new MalformedURLException();
         cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] & 
0xff));
-               if(cryptoAlgorithm != ALGO_AES_PCFB_256)
+               if(cryptoAlgorithm != Key.ALGO_AES_PCFB_256_SHA256)
                        throw new MalformedURLException("Invalid crypto 
algorithm");
         controlDocument = (extra[2] & 0x02) != 0;
         compressionAlgorithm = (short)(((extra[3] & 0xff) << 8) + (extra[4] & 
0xff));
@@ -83,7 +81,7 @@
                byte[] extra = new byte[EXTRA_LENGTH];
                dis.readFully(extra);
         cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] & 
0xff));
-               if(cryptoAlgorithm != ALGO_AES_PCFB_256)
+               if(cryptoAlgorithm != Key.ALGO_AES_PCFB_256_SHA256)
                        throw new MalformedURLException("Invalid crypto 
algorithm");
         compressionAlgorithm = (short)(((extra[3] & 0xff) << 8) + (extra[4] & 
0xff));
         controlDocument = (extra[2] & 0x02) != 0;
@@ -134,7 +132,7 @@
      */
     public FreenetURI getURI() {
         byte[] extra = getExtra();
-        return new FreenetURI("CHK", "", routingKey, cryptoKey, extra);
+        return new FreenetURI("CHK", null, routingKey, cryptoKey, extra);
     }

     /**

Modified: branches/freenet-freejvms/src/freenet/keys/ClientCHKBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientCHKBlock.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientCHKBlock.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -11,6 +11,7 @@
 import freenet.crypt.PCFBMode;
 import freenet.crypt.UnsupportedCipherException;
 import freenet.crypt.ciphers.Rijndael;
+import freenet.keys.Key.Compressed;
 import freenet.node.Node;
 import freenet.support.ArrayBucket;
 import freenet.support.ArrayBucketFactory;
@@ -64,7 +65,7 @@
      * Construct from a CHKBlock and a key.
      */
     public ClientCHKBlock(CHKBlock block, ClientCHK key2) throws 
CHKVerifyException {
-        this(block.getData(), block.getHeader(), key2, true);
+        this(block.getData(), block.getHeaders(), key2, true);
     }

     /**
@@ -93,7 +94,7 @@
      */
     public Bucket decode(BucketFactory bf, int maxLength) throws 
CHKDecodeException, IOException {
         // Overall hash already verified, so first job is to decrypt.
-        if(key.cryptoAlgorithm != ClientCHK.ALGO_AES_PCFB_256)
+        if(key.cryptoAlgorithm != Key.ALGO_AES_PCFB_256_SHA256)
             throw new UnsupportedOperationException();
         BlockCipher cipher;
         try {
@@ -107,8 +108,8 @@
             throw new CHKDecodeException("Crypto key too short");
         cipher.initialize(key.cryptoKey);
         PCFBMode pcfb = new PCFBMode(cipher);
-        byte[] hbuf = new byte[header.length-2];
-        System.arraycopy(header, 2, hbuf, 0, header.length-2);
+        byte[] hbuf = new byte[headers.length-2];
+        System.arraycopy(headers, 2, hbuf, 0, headers.length-2);
         byte[] dbuf = new byte[data.length];
         System.arraycopy(data, 0, dbuf, 0, data.length);
         // Decipher header first - functions as IV
@@ -140,7 +141,7 @@
         byte[] output = new byte[size];
         // No particular reason to check the padding, is there?
         System.arraycopy(dbuf, 0, output, 0, size);
-        return Key.decompress(key, output, bf, maxLength, 
key.compressionAlgorithm, Math.min(maxLength, MAX_LENGTH_BEFORE_COMPRESSION));
+        return Key.decompress(key.isCompressed(), output, bf, 
Math.min(maxLength, MAX_LENGTH_BEFORE_COMPRESSION), key.compressionAlgorithm, 
false);
     }

     /**
@@ -159,69 +160,13 @@
         byte[] header;
         ClientCHK key;
         short compressionAlgorithm = -1;
-        // Try to compress it - even if it fits into the block,
-        // because compressing it improves its entropy.
-        boolean compressed = false;
-        if(sourceData.size() > MAX_LENGTH_BEFORE_COMPRESSION)
-            throw new CHKEncodeException("Too big");
-        if(!dontCompress) {
-               byte[] cbuf = null;
-               if(alreadyCompressedCodec >= 0) {
-                       if(sourceData.size() > MAX_COMPRESSED_DATA_LENGTH)
-                               throw new CHKEncodeException("Too big 
(precompressed)");
-                       compressionAlgorithm = alreadyCompressedCodec;
-                       cbuf = BucketTools.toByteArray(sourceData);
-                       if(sourceLength > MAX_LENGTH_BEFORE_COMPRESSION)
-                               throw new CHKEncodeException("Too big");
-               } else {
-                       if (sourceData.size() > NodeCHK.BLOCK_SIZE) {
-                                       // Determine the best algorithm
-                                       for (int i = 0; i < 
Compressor.countCompressAlgorithms(); i++) {
-                                               Compressor comp = Compressor
-                                                               
.getCompressionAlgorithmByDifficulty(i);
-                                               ArrayBucket compressedData;
-                                               try {
-                                                       compressedData = 
(ArrayBucket) comp.compress(
-                                                                       
sourceData, new ArrayBucketFactory(), NodeCHK.BLOCK_SIZE);
-                                               } catch (IOException e) {
-                                                       throw new Error(e);
-                                               } catch 
(CompressionOutputSizeException e) {
-                                                       continue;
-                                               }
-                                               if (compressedData.size() <= 
MAX_COMPRESSED_DATA_LENGTH) {
-                                                       compressionAlgorithm = 
comp
-                                                                       
.codecNumberForMetadata();
-                                                       sourceLength = 
sourceData.size();
-                                                       try {
-                                                               cbuf = 
BucketTools.toByteArray(compressedData);
-                                                               // FIXME 
provide a method in ArrayBucket
-                                                       } catch (IOException e) 
{
-                                                               throw new 
Error(e);
-                                                       }
-                                                       break;
-                                               }
-                                       }
-                               }
-                       
-               }
-               if(cbuf != null) {
-                       // Use it
-                       int compressedLength = cbuf.length;
-                finalData = new byte[compressedLength+4];
-                System.arraycopy(cbuf, 0, finalData, 4, compressedLength);
-                finalData[0] = (byte) ((sourceLength >> 24) & 0xff);
-                finalData[1] = (byte) ((sourceLength >> 16) & 0xff);
-                finalData[2] = (byte) ((sourceLength >> 8) & 0xff);
-                finalData[3] = (byte) ((sourceLength) & 0xff);
-                compressed = true;
-               }
-        }
-        if(finalData == null) {
-            if(sourceData.size() > NodeCHK.BLOCK_SIZE) {
-                throw new CHKEncodeException("Too big");
-            }
-               finalData = BucketTools.toByteArray(sourceData);
-        }
+        try {
+                       Compressed comp = Key.compress(sourceData, 
dontCompress, alreadyCompressedCodec, sourceLength, 
MAX_LENGTH_BEFORE_COMPRESSION, MAX_COMPRESSED_DATA_LENGTH, false);
+                       finalData = comp.compressedData;
+                       compressionAlgorithm = comp.compressionAlgorithm;
+               } catch (KeyEncodeException e2) {
+                       throw new CHKEncodeException(e2.getMessage(), e2);
+               }

         // Now do the actual encode

@@ -286,7 +231,7 @@
         byte[] finalHash = md256.digest();

         // Now convert it into a ClientCHK
-        key = new ClientCHK(finalHash, encKey, asMetadata, 
ClientCHK.ALGO_AES_PCFB_256, compressionAlgorithm);
+        key = new ClientCHK(finalHash, encKey, asMetadata, 
Key.ALGO_AES_PCFB_256_SHA256, compressionAlgorithm);

         try {
             return new ClientCHKBlock(data, header, key, false);
@@ -313,10 +258,7 @@
                }
     }

-    /**
-     * @return The ClientCHK for this key.
-     */
-    public ClientCHK getClientKey() {
+    public ClientKey getClientKey() {
         return key;
     }


Copied: branches/freenet-freejvms/src/freenet/keys/ClientKSK.java (from rev 
7998, trunk/freenet/src/freenet/keys/ClientKSK.java)

Modified: branches/freenet-freejvms/src/freenet/keys/ClientKey.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientKey.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientKey.java   2006-02-03 
22:55:27 UTC (rev 7999)
@@ -11,6 +11,10 @@
        public static ClientKey getBaseKey(FreenetURI origURI) throws 
MalformedURLException {
                if(origURI.getKeyType().equals("CHK"))
                        return new ClientCHK(origURI);
+               if(origURI.getKeyType().equals("SSK"))
+                       return new ClientSSK(origURI);
+               if(origURI.getKeyType().equals("KSK"))
+                       return ClientKSK.create(origURI.getDocName());
                throw new UnsupportedOperationException("Unknown keytype from 
"+origURI);
        }


Modified: branches/freenet-freejvms/src/freenet/keys/ClientKeyBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientKeyBlock.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientKeyBlock.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -18,4 +18,9 @@
         */
        boolean isMetadata();

+    /**
+     * @return The ClientKey for this key.
+     */
+    public ClientKey getClientKey();
+
 }

Modified: branches/freenet-freejvms/src/freenet/keys/ClientSSK.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientSSK.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientSSK.java   2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,19 +1,22 @@
 package freenet.keys;

 import java.io.UnsupportedEncodingException;
+import java.net.MalformedURLException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;

 import freenet.crypt.DSAPublicKey;
 import freenet.crypt.UnsupportedCipherException;
 import freenet.crypt.ciphers.Rijndael;
+import freenet.support.Logger;

 public class ClientSSK extends ClientKey {

        /** Document name */
        public final String docName;
        /** Public key */
-       public final DSAPublicKey pubKey;
+       protected DSAPublicKey pubKey;
        /** Public key hash */
        public final byte[] pubKeyHash;
        /** Encryption key */
@@ -21,18 +24,31 @@
        /** Encrypted hashed docname */
        public final byte[] ehDocname;

-       public ClientSSK(String docName, DSAPublicKey pubKey, byte[] cryptoKey) 
{
+       static final int CRYPTO_KEY_LENGTH = 32;
+       
+       public ClientSSK(String docName, byte[] pubKeyHash, byte[] extras, 
DSAPublicKey pubKey, byte[] cryptoKey) throws MalformedURLException {
                this.docName = docName;
                this.pubKey = pubKey;
-               byte[] pubKeyAsBytes = pubKey.asBytes();
+               this.pubKeyHash = pubKeyHash;
+               if(!Arrays.equals(extras, getExtraBytes()))
+                       throw new MalformedURLException("Wrong extra bytes");
+               if(pubKeyHash.length != NodeSSK.PUBKEY_HASH_SIZE)
+                       throw new MalformedURLException("Pubkey hash wrong 
length: "+pubKeyHash.length+" should be "+NodeSSK.PUBKEY_HASH_SIZE);
+               if(cryptoKey.length != CRYPTO_KEY_LENGTH)
+                       throw new MalformedURLException("Decryption key wrong 
length: "+cryptoKey.length+" should be "+CRYPTO_KEY_LENGTH);
                MessageDigest md;
                try {
                        md = MessageDigest.getInstance("SHA-256");
-                       md.update(pubKeyAsBytes);
-                       pubKeyHash = md.digest();
                } catch (NoSuchAlgorithmException e) {
                        throw new Error(e);
                }
+               if(pubKey != null) {
+                       byte[] pubKeyAsBytes = pubKey.asBytes();
+                       md.update(pubKeyAsBytes);
+                       byte[] otherPubKeyHash = md.digest();
+                       if(!Arrays.equals(otherPubKeyHash, pubKeyHash))
+                               throw new IllegalArgumentException();
+               }
                this.cryptoKey = cryptoKey;
                try {
                        md.update(docName.getBytes("UTF-8"));
@@ -48,15 +64,51 @@
                } catch (UnsupportedCipherException e) {
                        throw new Error(e);
                }
-               
        }

+       public ClientSSK(FreenetURI origURI) throws MalformedURLException {
+               this(origURI.getDocName(), origURI.getRoutingKey(), 
origURI.getExtra(), null, origURI.getCryptoKey());
+               if(!origURI.getKeyType().equalsIgnoreCase("SSK"))
+                       throw new MalformedURLException();
+       }
+       
+       public void setPublicKey(DSAPublicKey pubKey) {
+               if(this.pubKey != null && this.pubKey != pubKey && 
!this.pubKey.equals(pubKey))
+                       throw new IllegalArgumentException("Cannot reassign: 
was "+this.pubKey+" now "+pubKey);
+               this.pubKey = pubKey;
+       }
+       
        public FreenetURI getURI() {
-               return new FreenetURI("SSK", docName, pubKeyHash, cryptoKey, 
null);
+               return new FreenetURI("SSK", docName, pubKeyHash, cryptoKey, 
getExtraBytes());
        }
+       
+       protected static final byte[] getExtraBytes() {
+               // 3 bytes.
+               byte[] extra = new byte[5];

+               short cryptoAlgorithm = NodeSSK.ALGO_AES_PCFB_256_SHA256;
+               
+               extra[0] = NodeSSK.SSK_VERSION;
+               extra[1] = (byte) (cryptoAlgorithm >> 8);
+               extra[2] = (byte) cryptoAlgorithm;
+               extra[3] = (byte) (KeyBlock.HASH_SHA256 >> 8);
+               extra[4] = (byte) KeyBlock.HASH_SHA256;
+               return extra;
+       }
+
        public Key getNodeKey() {
-               return new NodeSSK(pubKeyHash, ehDocname);
+               try {
+                       return new NodeSSK(pubKeyHash, ehDocname, pubKey);
+               } catch (SSKVerifyException e) {
+                       IllegalStateException x = new 
IllegalStateException("Have already verified and yet it fails!: "+e);
+                       Logger.error(this, "Have already verified and yet it 
fails!: "+e);
+                       x.initCause(e);
+                       throw x;
+               }
        }

+       public DSAPublicKey getPubKey() {
+               return pubKey;
+       }
+
 }

Modified: branches/freenet-freejvms/src/freenet/keys/ClientSSKBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/ClientSSKBlock.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/ClientSSKBlock.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -12,6 +12,8 @@

        static final int DATA_DECRYPT_KEY_LENGTH = 32;

+       static public final int MAX_DECOMPRESSED_DATA_LENGTH = 32768;
+       
        /** Is metadata. Set on decode. */
        private boolean isMetadata;
        /** Has decoded? */
@@ -19,18 +21,23 @@
        /** Client-key. This contains the decryption key etc. */
        private ClientSSK key;

-       public ClientSSKBlock(byte[] data, byte[] headers, ClientSSK key) 
throws SSKVerifyException {
-               super(data, headers, (NodeSSK) key.getNodeKey());
+       public ClientSSKBlock(byte[] data, byte[] headers, ClientSSK key, 
boolean dontVerify) throws SSKVerifyException {
+               super(data, headers, (NodeSSK) key.getNodeKey(), dontVerify);
+               this.key = key;
        }

+       public ClientSSKBlock(SSKBlock block, ClientSSK key) throws 
SSKVerifyException {
+               this(block.data, block.headers, key, false);
+       }
+       
        /**
         * Decode the data.
         */
        public Bucket decode(BucketFactory factory, int maxLength) throws 
KeyDecodeException, IOException {
                /* We know the signature is valid because it is checked in the 
constructor. */
                /* We also know e(h(docname)) is valid */
-               byte[] decryptedHeaders = new byte[headers.length - 
headersOffset];
-               System.arraycopy(headers, headersOffset, decryptedHeaders, 0, 
headers.length - headersOffset);
+               byte[] decryptedHeaders = new byte[ENCRYPTED_HEADERS_LENGTH];
+               System.arraycopy(headers, headersOffset, decryptedHeaders, 0, 
ENCRYPTED_HEADERS_LENGTH);
                Rijndael aes;
                try {
                        aes = new Rijndael(256,256);
@@ -69,11 +76,10 @@
                        dataOutput = realDataOutput;
                }
         short compressionAlgorithm = 
(short)(((decryptedHeaders[DATA_DECRYPT_KEY_LENGTH+2] & 0xff) << 8) + 
(decryptedHeaders[DATA_DECRYPT_KEY_LENGTH+3] & 0xff));
-        
-               
-               decoded = true;
-               // TODO Auto-generated method stub
-               return null;
+
+        Bucket b = Key.decompress(compressionAlgorithm >= 0, dataOutput, 
factory, Math.min(MAX_DECOMPRESSED_DATA_LENGTH, maxLength), 
compressionAlgorithm, true);
+        decoded = true;
+        return b;
        }

        public boolean isMetadata() {
@@ -82,4 +88,8 @@
                return isMetadata;
        }

+       public ClientKey getClientKey() {
+               return key;
+       }
+
 }

Modified: branches/freenet-freejvms/src/freenet/keys/FreenetURI.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/FreenetURI.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/FreenetURI.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -21,16 +21,19 @@
  * FreenetURI handles parsing and creation of the Freenet URI format, defined
  * as follows:
  * <p>
- * 
<code>freenet:[KeyType@]RoutingKey,CryptoKey[,n1=v1,n2=v2,...][/docname][//metastring]</code>
+ * 
<code>freenet:[KeyType@]RoutingKey,CryptoKey[,n1=v1,n2=v2,...][/docname][/metastring]</code>
  * </p>
  * <p>
  * where KeyType is the TLA of the key (currently SVK, SSK, KSK, or CHK). If
  * omitted, KeyType defaults to KSK.
+ * BUT: CHKs don't support or require a docname.
+ * KSKs and SSKs do.
+ * Therefore CHKs go straight into metastrings.
  * </p>
  * <p>
  * For KSKs, the string keyword (docname) takes the RoutingKey position and the
  * remainder of the fields are inapplicable (except metastring). Examples:
- * <coe>freenet:KSK at foo//bar freenet:KSK at test.html 
freenet:test.html</code>.
+ * <coe>freenet:KSK at foo/bar freenet:KSK at test.html 
freenet:test.html</code>.
  * </p>
  * <p>
  * RoutingKey is the modified Base64 encoded key value. CryptoKey is the
@@ -137,8 +140,7 @@
                // decode keyType
                int atchar = URI.indexOf('@');
                if (atchar == -1) {
-                       keyType = "KSK";
-                       atchar = colon;
+                       throw new MalformedURLException();
                } else {
                        keyType = URI.substring(colon + 1, 
atchar).toUpperCase().trim();
                }
@@ -147,30 +149,31 @@
                // decode metaString
                int slash2;
                Vector sv = new Vector();
-               while ((slash2 = URI.lastIndexOf("//")) != -1) {
-                       String s = urlDecode(URI.substring(slash2 + 
"//".length()));
+               while ((slash2 = URI.lastIndexOf("/")) != -1) {
+                       String s = URI.substring(slash2 + "/".length());
                        if (s != null)
-                               sv.addElement(urlDecode(s));
+                               sv.addElement(s);
                        URI = URI.substring(0, slash2);
                }
+               if("SSK".equals(keyType)) {
+                       // docName not necessary, nor is it supported, for CHKs.
+                       
+                       if(sv.isEmpty())
+                               throw new MalformedURLException("No docname");
+                       docName = (String) sv.remove(sv.size()-1);
+               }
+               
                if (!sv.isEmpty()) {
                        metaStr = new String[sv.size()];
                        for (int i = 0; i < metaStr.length; i++)
                                metaStr[i] = (String) 
sv.elementAt(metaStr.length - 1 - i);
                }

-               // decode docName
-               if ("KSK".equals(keyType)) {
-                       docName = urlDecode(URI);
+               if(keyType.equalsIgnoreCase("KSK")) {
+                       docName = URI;
                        return;
                }
-
-               int slash1 = URI.indexOf('/');
-               if (slash1 != -1) {
-                       docName = urlDecode(URI.substring(slash1 + 1));
-                       URI = URI.substring(0, slash1);
-               }
-
+               
                // URI now contains: routingKey[,cryptoKey][,metaInfo]
                StringTokenizer st = new StringTokenizer(URI, ",");
                try {
@@ -323,34 +326,6 @@
                        extra);
        }

-       protected static String urlDecode(String s) {
-               StringBuffer b = new StringBuffer();
-               for (int i = 0; i < s.length(); i++) {
-                       if (s.charAt(i) == '+')
-                               b.append(' ');
-                       else if (s.charAt(i) == '%') {
-                               int n = Integer.parseInt(s.substring(i + 1, i + 
3), 16);
-                               b.append((char) n);
-                               i += 2;
-                       } else
-                               b.append(s.charAt(i));
-               }
-               return b.toString();
-       }
-
-       protected static String urlEncode(String s) {
-               StringBuffer b = new StringBuffer();
-               for (int i = 0; i < s.length(); i++) {
-                       if (s.charAt(i) == ' ')
-                               b.append('+');
-                       else if (s.charAt(i) > 128 || s.charAt(i) < 44) {
-                               
b.append('%').append(Integer.toString(s.charAt(i), 16));
-                       } else
-                               b.append(s.charAt(i));
-               }
-               return b.toString();
-       }
-
        public String toString() {
                return toString(true);
        }
@@ -376,10 +351,10 @@
                }

                if (docName != null)
-                       b.append(urlEncode(docName));
+                       b.append(docName);
                if (metaStr != null) {
                        for (int i = 0; i < metaStr.length; i++) {
-                               b.append("//").append(urlEncode(metaStr[i]));
+                               b.append("/").append(metaStr[i]);
                        }
                }
                return b.toString();

Copied: branches/freenet-freejvms/src/freenet/keys/InsertableClientSSK.java 
(from rev 7998, trunk/freenet/src/freenet/keys/InsertableClientSSK.java)

Modified: branches/freenet-freejvms/src/freenet/keys/Key.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/Key.java 2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/Key.java 2006-02-03 22:55:27 UTC 
(rev 7999)
@@ -7,6 +7,8 @@
 import java.security.NoSuchAlgorithmException;

 import freenet.io.WritableToDataOutputStream;
+import freenet.support.ArrayBucket;
+import freenet.support.ArrayBucketFactory;
 import freenet.support.Bucket;
 import freenet.support.BucketFactory;
 import freenet.support.BucketTools;
@@ -28,8 +30,8 @@
     /** Whatever its type, it will need a routingKey ! */
     final byte[] routingKey;

-    /** 32 bytes for hash, 2 bytes for type */
-    public static final short KEY_SIZE_ON_DISK = 34;
+    /** Code for 256-bit AES with PCFB and SHA-256 */
+    static final short ALGO_AES_PCFB_256_SHA256 = 1;

     protected Key(byte[] routingKey) {
        this.routingKey = routingKey;
@@ -49,11 +51,13 @@
      * @param raf The file to read from.
      * @return a Key, or throw an exception, or return null if the key is not 
parsable.
      */
-    public static Key read(DataInput raf) throws IOException {
+    public static final Key read(DataInput raf) throws IOException {
         short type = raf.readShort();
         if(type == NodeCHK.TYPE) {
-            return NodeCHK.read(raf);
-        }
+            return NodeCHK.readCHK(raf);
+        } else if(type == NodeSSK.TYPE)
+               return NodeSSK.readSSK(raf);
+        
         throw new IOException("Unrecognized format: "+type);
     }

@@ -90,18 +94,22 @@
         return hash;
     }

-    static Bucket decompress(ClientCHK key, byte[] output, BucketFactory bf, 
int maxLength, short compressionAlgorithm, int maxDecompressedLength) throws 
CHKDecodeException, IOException {
-        if(key.isCompressed()) {
-               Logger.minor(key, "Decompressing in decode: "+key.getURI()+" 
with codec "+compressionAlgorithm);
-            if(output.length < 5) throw new CHKDecodeException("No bytes to 
decompress");
+    static Bucket decompress(boolean isCompressed, byte[] output, 
BucketFactory bf, int maxLength, short compressionAlgorithm, boolean 
shortLength) throws CHKDecodeException, IOException {
+        if(isCompressed) {
+               Logger.minor(Key.class, "Decompressing "+output.length+" bytes 
in decode with codec "+compressionAlgorithm);
+            if(output.length < (shortLength ? 3 : 5)) throw new 
CHKDecodeException("No bytes to decompress");
             // Decompress
             // First get the length
-            int len = ((((((output[0] & 0xff) << 8) + (output[1] & 0xff)) << 
8) + (output[2] & 0xff)) << 8) +
-               (output[3] & 0xff);
-            if(len > maxDecompressedLength)
+            int len;
+            if(shortLength)
+               len = ((output[0] & 0xff) << 8) + (output[1] & 0xff);
+            else 
+               len = ((((((output[0] & 0xff) << 8) + (output[1] & 0xff)) << 8) 
+ (output[2] & 0xff)) << 8) +
+                       (output[3] & 0xff);
+            if(len > maxLength)
                 throw new CHKDecodeException("Invalid precompressed size: 
"+len);
             Compressor decompressor = 
Compressor.getCompressionAlgorithmByMetadataID(compressionAlgorithm);
-            Bucket inputBucket = new SimpleReadOnlyArrayBucket(output, 4, 
output.length-4);
+            Bucket inputBucket = new SimpleReadOnlyArrayBucket(output, 
shortLength?2:4, output.length-(shortLength?2:4));
             try {
                                return decompressor.decompress(inputBucket, bf, 
maxLength);
                        } catch (CompressionOutputSizeException e) {
@@ -112,4 +120,90 @@
         }
        }

+    static class Compressed {
+       public Compressed(byte[] finalData, short compressionAlgorithm2) {
+               this.compressedData = finalData;
+               this.compressionAlgorithm = compressionAlgorithm2;
+               }
+               byte[] compressedData;
+       short compressionAlgorithm;
+    }
+    
+    static Compressed compress(Bucket sourceData, boolean dontCompress, short 
alreadyCompressedCodec, long sourceLength, long MAX_LENGTH_BEFORE_COMPRESSION, 
long MAX_COMPRESSED_DATA_LENGTH, boolean shortLength) throws 
KeyEncodeException, IOException {
+       byte[] finalData = null;
+        short compressionAlgorithm = -1;
+        // Try to compress it - even if it fits into the block,
+        // because compressing it improves its entropy.
+        if(sourceData.size() > MAX_LENGTH_BEFORE_COMPRESSION)
+            throw new KeyEncodeException("Too big");
+        if((!dontCompress) || alreadyCompressedCodec >= 0) {
+               byte[] cbuf = null;
+               if(alreadyCompressedCodec >= 0) {
+                       if(sourceData.size() > MAX_COMPRESSED_DATA_LENGTH)
+                               throw new KeyEncodeException("Too big 
(precompressed)");
+                       compressionAlgorithm = alreadyCompressedCodec;
+                       cbuf = BucketTools.toByteArray(sourceData);
+                       if(sourceLength > MAX_LENGTH_BEFORE_COMPRESSION)
+                               throw new CHKEncodeException("Too big");
+               } else {
+                       if (sourceData.size() > NodeCHK.BLOCK_SIZE) {
+                                       // Determine the best algorithm
+                                       for (int i = 0; i < 
Compressor.countCompressAlgorithms(); i++) {
+                                               Compressor comp = Compressor
+                                                               
.getCompressionAlgorithmByDifficulty(i);
+                                               ArrayBucket compressedData;
+                                               try {
+                                                       compressedData = 
(ArrayBucket) comp.compress(
+                                                                       
sourceData, new ArrayBucketFactory(), NodeCHK.BLOCK_SIZE);
+                                               } catch (IOException e) {
+                                                       throw new Error(e);
+                                               } catch 
(CompressionOutputSizeException e) {
+                                                       continue;
+                                               }
+                                               if (compressedData.size() <= 
MAX_COMPRESSED_DATA_LENGTH) {
+                                                       compressionAlgorithm = 
comp
+                                                                       
.codecNumberForMetadata();
+                                                       sourceLength = 
sourceData.size();
+                                                       try {
+                                                               cbuf = 
BucketTools.toByteArray(compressedData);
+                                                               // FIXME 
provide a method in ArrayBucket
+                                                       } catch (IOException e) 
{
+                                                               throw new 
Error(e);
+                                                       }
+                                                       break;
+                                               }
+                                       }
+                               }
+                       
+               }
+               if(cbuf != null) {
+                       // Use it
+                       int compressedLength = cbuf.length;
+                finalData = new byte[compressedLength+(shortLength?2:4)];
+                System.arraycopy(cbuf, 0, finalData, shortLength?2:4, 
compressedLength);
+                if(!shortLength) {
+                       finalData[0] = (byte) ((sourceLength >> 24) & 0xff);
+                       finalData[1] = (byte) ((sourceLength >> 16) & 0xff);
+                       finalData[2] = (byte) ((sourceLength >> 8) & 0xff);
+                       finalData[3] = (byte) ((sourceLength) & 0xff);
+                } else {
+                       finalData[0] = (byte) ((sourceLength >> 8) & 0xff);
+                       finalData[1] = (byte) ((sourceLength) & 0xff);
+                }
+               }
+        }
+        if(finalData == null) {
+            if(sourceData.size() > NodeCHK.BLOCK_SIZE) {
+                throw new CHKEncodeException("Too big");
+            }
+               finalData = BucketTools.toByteArray(sourceData);
+        }
+
+        return new Compressed(finalData, compressionAlgorithm);
+    }
+    
+    public byte[] getRoutingKey() {
+       return routingKey;
+    }
+    
 }

Modified: branches/freenet-freejvms/src/freenet/keys/KeyBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/KeyBlock.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/KeyBlock.java    2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,10 +1,5 @@
 package freenet.keys;

-import java.io.IOException;
-
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-
 /**
  * Interface for fetched blocks. Can be decoded with a key.
  */
@@ -12,4 +7,8 @@

     final static int HASH_SHA256 = 1;

+    public Key getKey();
+    public byte[] getRawHeaders();
+    public byte[] getRawData();
+
 }

Copied: branches/freenet-freejvms/src/freenet/keys/KeyEncodeException.java 
(from rev 7998, trunk/freenet/src/freenet/keys/KeyEncodeException.java)

Copied: branches/freenet-freejvms/src/freenet/keys/KeyVerifyException.java 
(from rev 7998, trunk/freenet/src/freenet/keys/KeyVerifyException.java)

Modified: branches/freenet-freejvms/src/freenet/keys/NodeCHK.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/NodeCHK.java     2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/NodeCHK.java     2006-02-03 
22:55:27 UTC (rev 7999)
@@ -4,11 +4,8 @@
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;

 import freenet.support.Base64;
-import freenet.support.Fields;

 import gnu.crypto.hash.Sha256;
 import gnu.crypto.Registry;
@@ -27,6 +24,9 @@
  */
 public class NodeCHK extends Key {

+    /** 32 bytes for hash, 2 bytes for type */
+    public static final short KEY_SIZE_ON_DISK = 34;
+       
     public NodeCHK(byte[] routingKey2) {
        super(routingKey2);
         if(routingKey2.length != KEY_LENGTH)
@@ -53,7 +53,7 @@
         _index.write(routingKey);
     }

-    public static Key read(DataInput raf) throws IOException {
+    public static Key readCHK(DataInput raf) throws IOException {
         byte[] buf = new byte[KEY_LENGTH];
         raf.readFully(buf);
         return new NodeCHK(buf);

Modified: branches/freenet-freejvms/src/freenet/keys/NodeSSK.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/NodeSSK.java     2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/NodeSSK.java     2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,12 +1,17 @@
 package freenet.keys;

+import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;

 import freenet.crypt.DSAPublicKey;
+import freenet.support.Fields;
+import freenet.support.HexUtil;
+import freenet.support.Logger;

 /**
  * An SSK is a Signed Subspace Key.
@@ -25,11 +30,38 @@
        final byte[] encryptedHashedDocname;
        /** The signature key, if we know it */
        DSAPublicKey pubKey;
+       final int hashCode;

-       public NodeSSK(byte[] pkHash, byte[] ehDocname) {
+       static final int SSK_VERSION = 1;
+       
+       static final int PUBKEY_HASH_SIZE = 32;
+       static final int E_H_DOCNAME_SIZE = 32;
+       
+       public String toString() {
+               return 
super.toString()+":pkh="+HexUtil.bytesToHex(pubKeyHash)+":ehd="+HexUtil.bytesToHex(encryptedHashedDocname);
+       }
+       
+       public NodeSSK(byte[] pkHash, byte[] ehDocname, DSAPublicKey pubKey) 
throws SSKVerifyException {
                super(makeRoutingKey(pkHash, ehDocname));
                this.encryptedHashedDocname = ehDocname;
                this.pubKeyHash = pkHash;
+               this.pubKey = pubKey;
+               if(pubKey != null) {
+                       MessageDigest md256;
+                       try {
+                               md256 = MessageDigest.getInstance("SHA-256");
+                       } catch (NoSuchAlgorithmException e) {
+                               throw new Error(e);
+                       }
+                       byte[] hash = md256.digest(pubKey.asBytes());
+                       if(!Arrays.equals(hash, pkHash))
+                               throw new SSKVerifyException("Invalid pubKey: 
wrong hash");
+               }
+               if(ehDocname.length != E_H_DOCNAME_SIZE)
+                       throw new IllegalArgumentException("ehDocname must be 
"+E_H_DOCNAME_SIZE+" bytes");
+               if(pkHash.length != PUBKEY_HASH_SIZE)
+                       throw new IllegalArgumentException("pubKeyHash must be 
"+PUBKEY_HASH_SIZE+" bytes");
+               hashCode = Fields.hashCode(pkHash) ^ Fields.hashCode(ehDocname);
        }

        // routingKey = H( E(H(docname)) + H(pubkey) )
@@ -46,7 +78,7 @@
        }

        // 01 = SSK, 01 = first version of SSK
-       public short TYPE = 0x0201;
+       public static short TYPE = 0x0201;

        public void write(DataOutput _index) throws IOException {
         _index.writeShort(TYPE);
@@ -54,6 +86,21 @@
         _index.write(pubKeyHash);
        }

+    public static Key readSSK(DataInput raf) throws IOException {
+        byte[] buf = new byte[E_H_DOCNAME_SIZE];
+        raf.readFully(buf);
+        byte[] buf2 = new byte[PUBKEY_HASH_SIZE];
+        raf.readFully(buf2);
+        try {
+                       return new NodeSSK(buf2, buf, null);
+               } catch (SSKVerifyException e) {
+                       IllegalStateException impossible = 
+                               new IllegalStateException("Impossible: "+e);
+                       impossible.initCause(e);
+                       throw impossible;
+               }
+    }
+
        public short getType() {
                return TYPE;
        }
@@ -76,4 +123,50 @@
                return pubKey;
        }

+       public byte[] getPubKeyHash() {
+               return pubKeyHash;
+       }
+
+       public void setPubKey(DSAPublicKey pubKey2) throws SSKVerifyException {
+               if(pubKey == pubKey2) return;
+               if(pubKey2 == null) return;
+               if(pubKey == null || !pubKey2.equals(pubKey)) {
+                       if(pubKey2 != null) {
+                               MessageDigest md256;
+                               try {
+                                       md256 = 
MessageDigest.getInstance("SHA-256");
+                               } catch (NoSuchAlgorithmException e) {
+                                       throw new Error(e);
+                               }
+                               byte[] newPubKeyHash = 
md256.digest(pubKey2.asBytes());
+                               if(Arrays.equals(pubKeyHash, newPubKeyHash)) {
+                                       if(pubKey != null) {
+                                               // same hash, yet different 
keys!
+                                               Logger.error(this, "Found 
SHA-256 collision or something... WTF?");
+                                               throw new 
SSKVerifyException("Invalid new pubkey: "+pubKey2+" old pubkey: "+pubKey);
+                                       } else {
+                                               // Valid key, assign.
+                                       }
+                               } else {
+                                       throw new SSKVerifyException("New 
pubkey has invalid hash");
+                               }
+                       }
+                       pubKey = pubKey2;
+               }
+       }
+
+       public boolean equals(Object o) {
+               if(!(o instanceof NodeSSK)) return false;
+               NodeSSK key = (NodeSSK)o;
+               if(!Arrays.equals(key.encryptedHashedDocname, 
encryptedHashedDocname)) return false;
+               if(!Arrays.equals(key.pubKeyHash, pubKeyHash)) return false;
+               if(!Arrays.equals(key.routingKey, routingKey)) return false;
+               // cachedNormalizedDouble and pubKey could be negative/null.
+               return true;
+       }
+       
+       public int hashCode() {
+               return hashCode;
+       }
+       
 }

Modified: branches/freenet-freejvms/src/freenet/keys/SSKBlock.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/SSKBlock.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/SSKBlock.java    2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,17 +1,14 @@
 package freenet.keys;

-import java.io.IOException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;

 import net.i2p.util.NativeBigInteger;
-
 import freenet.crypt.DSA;
 import freenet.crypt.DSAPublicKey;
 import freenet.crypt.DSASignature;
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
+import freenet.support.HexUtil;

 /**
  * SSKBlock. Contains a full fetched key. Can do a node-level verification. 
Can 
@@ -25,18 +22,19 @@
        final int headersOffset;
        /* HEADERS FORMAT:
         * 2 bytes - hash ID
-        * SIGNATURE ON THE BELOW HASH:
-        *  20 bytes - signature: R (unsigned bytes)
-        *  20 bytes - signature: S (unsigned bytes)
-        * IMPLICIT - hash of remaining fields, including the implicit hash of 
data
-        * IMPLICIT - hash of data
         * 2 bytes - symmetric cipher ID
         * 32 bytes - E(H(docname))
         * ENCRYPTED WITH E(H(docname)) AS IV:
         *  32 bytes - H(decrypted data), = data decryption key
         *  2 bytes - data length + metadata flag
         *  2 bytes - data compression algorithm or -1
+        * IMPLICIT - hash of data
+        * IMPLICIT - hash of remaining fields, including the implicit hash of 
data
         * 
+        * SIGNATURE ON THE ABOVE HASH:
+        *  32 bytes - signature: R (unsigned bytes)
+        *  32 bytes - signature: S (unsigned bytes)
+        * 
         * PLUS THE PUBKEY:
         *  Pubkey
         *  Group
@@ -46,17 +44,37 @@
     final short hashIdentifier;
     final short symCipherIdentifier;

-    static final short DATA_LENGTH = 1024;
+    public static final short DATA_LENGTH = 1024;

-    static final short SIG_R_LENGTH = 20;
-    static final short SIG_S_LENGTH = 20;
+    static final short SIG_R_LENGTH = 32;
+    static final short SIG_S_LENGTH = 32;
     static final short E_H_DOCNAME_LENGTH = 32;
-       
+    static public final short TOTAL_HEADERS_LENGTH = 2 + SIG_R_LENGTH + 
SIG_S_LENGTH + 2 + 
+       E_H_DOCNAME_LENGTH + ClientSSKBlock.DATA_DECRYPT_KEY_LENGTH + 2 + 2;
+    
+    static final short ENCRYPTED_HEADERS_LENGTH = 36;
+    
+    public boolean equals(Object o) {
+       if(!(o instanceof SSKBlock)) return false;
+       SSKBlock block = (SSKBlock)o;
+
+       if(!block.pubKey.equals(pubKey)) return false;
+       if(!block.nodeKey.equals(nodeKey)) return false;
+       if(block.headersOffset != headersOffset) return false;
+       if(block.hashIdentifier != hashIdentifier) return false;
+       if(block.symCipherIdentifier != symCipherIdentifier) return false;
+       if(!Arrays.equals(block.headers, headers)) return false;
+       if(!Arrays.equals(block.data, data)) return false;
+       return true;
+    }
+    
        /**
         * Initialize, and verify data, headers against key. Provided
         * key must have a pubkey, or we throw.
         */
-       public SSKBlock(byte[] data, byte[] headers, NodeSSK nodeKey) throws 
SSKVerifyException {
+       public SSKBlock(byte[] data, byte[] headers, NodeSSK nodeKey, boolean 
dontVerify) throws SSKVerifyException {
+               if(headers.length != TOTAL_HEADERS_LENGTH)
+                       throw new 
IllegalArgumentException("Headers.length="+headers.length+" should be 
"+TOTAL_HEADERS_LENGTH);
                this.data = data;
                this.headers = headers;
                this.nodeKey = nodeKey;
@@ -65,10 +83,6 @@
                        throw new SSKVerifyException("Data length wrong: 
"+data.length+" should be "+DATA_LENGTH);
                if(pubKey == null)
                        throw new SSKVerifyException("PubKey was null from 
"+nodeKey);
-        if(headers.length < 2) throw new IllegalArgumentException("Too short: 
"+headers.length);
-        hashIdentifier = (short)(((headers[0] & 0xff) << 8) + (headers[1] & 
0xff));
-        if(hashIdentifier != HASH_SHA256)
-            throw new SSKVerifyException("Hash not SHA-256");
         MessageDigest md;
         try {
             md = MessageDigest.getInstance("SHA-256");
@@ -76,38 +90,64 @@
             throw new Error(e);
         }
         // Now verify it
+        hashIdentifier = (short)(((headers[0] & 0xff) << 8) + (headers[1] & 
0xff));
+        if(hashIdentifier != HASH_SHA256)
+            throw new SSKVerifyException("Hash not SHA-256");
+        int x = 2;
+               symCipherIdentifier = (short)(((headers[x] & 0xff) << 8) + 
(headers[x+1] & 0xff));
+               x+=2;
+               // Then E(H(docname))
+               byte[] ehDocname = new byte[E_H_DOCNAME_LENGTH];
+               System.arraycopy(headers, x, ehDocname, 0, ehDocname.length);
+               x += E_H_DOCNAME_LENGTH;
+               headersOffset = x; // is index to start of encrypted headers
+               x += ENCRYPTED_HEADERS_LENGTH;
                // Extract the signature
                byte[] bufR = new byte[SIG_R_LENGTH];
                byte[] bufS = new byte[SIG_S_LENGTH];
-               int x = 2;
                if(x+SIG_R_LENGTH+SIG_S_LENGTH > headers.length)
                        throw new SSKVerifyException("Headers too short: 
"+headers.length+" should be at least "+x+SIG_R_LENGTH+SIG_S_LENGTH);
-               System.arraycopy(headers, x, bufR, 0, SIG_R_LENGTH);
+               if(!dontVerify)
+                       System.arraycopy(headers, x, bufR, 0, SIG_R_LENGTH);
                x+=SIG_R_LENGTH;
-               System.arraycopy(headers, x, bufS, 0, SIG_S_LENGTH);
+               if(!dontVerify)
+                       System.arraycopy(headers, x, bufS, 0, SIG_S_LENGTH);
                x+=SIG_S_LENGTH;
                // Compute the hash on the data
-               md.update(data);
-               byte[] dataHash = md.digest();
-               md.update(dataHash);
-               md.update(headers, x, headers.length - x);
-               byte[] overallHash = md.digest();
-               // Now verify it
-               NativeBigInteger r = new NativeBigInteger(1, bufR);
-               NativeBigInteger s = new NativeBigInteger(1, bufS);
-               if(!DSA.verify(pubKey, new DSASignature(r, s), new 
NativeBigInteger(1, overallHash))) {
-                       throw new SSKVerifyException("Signature verification 
failed for node-level SSK");
+               if(!dontVerify) {
+                       md.update(data);
+                       byte[] dataHash = md.digest();
+                       // All headers up to and not including the signature
+                       md.update(headers, 0, headersOffset + 
ENCRYPTED_HEADERS_LENGTH);
+                       // Then the implicit data hash
+                       md.update(dataHash);
+                       // Makes the implicit overall hash
+                       byte[] overallHash = md.digest();
+                       // Now verify it
+                       NativeBigInteger r = new NativeBigInteger(1, bufR);
+                       NativeBigInteger s = new NativeBigInteger(1, bufS);
+                       if(!DSA.verify(pubKey, new DSASignature(r, s), new 
NativeBigInteger(1, overallHash))) {
+                               throw new SSKVerifyException("Signature 
verification failed for node-level SSK");
+                       }
                }
-               if(headers.length < x+2+E_H_DOCNAME_LENGTH)
-                       throw new SSKVerifyException("Headers too short after 
sig verification: "+headers.length+" should be "+x+2+E_H_DOCNAME_LENGTH);
-               symCipherIdentifier = (short)(((headers[x] & 0xff) << 8) + 
(headers[x+1] & 0xff));
-               x+=2;
-               byte[] ehDocname = new byte[E_H_DOCNAME_LENGTH];
-               System.arraycopy(headers, x, ehDocname, 0, ehDocname.length);
-               x+=E_H_DOCNAME_LENGTH;
-               headersOffset = x; // is index to start of e(h(docname))
                if(!Arrays.equals(ehDocname, nodeKey.encryptedHashedDocname))
-                       throw new SSKVerifyException("E(H(docname)) wrong - 
wrong key??");
+                       throw new SSKVerifyException("E(H(docname)) wrong - 
wrong key?? \nfrom headers: "+HexUtil.bytesToHex(ehDocname)+"\nfrom key:     
"+HexUtil.bytesToHex(nodeKey.encryptedHashedDocname));
        }

+       public Key getKey() {
+               return nodeKey;
+       }
+
+       public byte[] getRawHeaders() {
+               return headers;
+       }
+
+       public byte[] getRawData() {
+               return data;
+       }
+
+       public DSAPublicKey getPubKey() {
+               return pubKey;
+       }
+
 }

Copied: branches/freenet-freejvms/src/freenet/keys/SSKEncodeException.java 
(from rev 7998, trunk/freenet/src/freenet/keys/SSKEncodeException.java)

Modified: branches/freenet-freejvms/src/freenet/keys/SSKVerifyException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/keys/SSKVerifyException.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/keys/SSKVerifyException.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -3,7 +3,7 @@
 /**
  * Thrown when an SSK fails to verify at the node level.
  */
-public class SSKVerifyException extends Exception {
+public class SSKVerifyException extends KeyVerifyException {

        public SSKVerifyException(String string) {
                super(string);

Copied: branches/freenet-freejvms/src/freenet/node/AnyInsertSender.java (from 
rev 7998, trunk/freenet/src/freenet/node/AnyInsertSender.java)

Copied: branches/freenet-freejvms/src/freenet/node/CHKInsertSender.java (from 
rev 7998, trunk/freenet/src/freenet/node/CHKInsertSender.java)

Modified: branches/freenet-freejvms/src/freenet/node/FNPPacketMangler.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/FNPPacketMangler.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/FNPPacketMangler.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1140,8 +1140,8 @@
         plaintext[ptr++] = (byte)(seqNumber >> 8);
         plaintext[ptr++] = (byte)seqNumber;

+        Logger.minor(this, "Getting random junk");
         node.random.nextBytes(randomJunk);
-        Logger.minor(this, "Got random junk");
         System.arraycopy(randomJunk, 0, plaintext, ptr, RANDOM_BYTES_LENGTH);
         ptr += RANDOM_BYTES_LENGTH;

@@ -1297,8 +1297,11 @@
                 pn.setDHContext(ctx);
             }
         }
-        sendFirstHalfDHPacket(0, ctx.getOurExponential(), pn, pn.getPeer());
-        pn.sentHandshake();
+
+        for(int i=0;i<pn.getHandshakeIPs().length;i++){
+               sendFirstHalfDHPacket(0, ctx.getOurExponential(), pn, 
pn.getHandshakeIPs()[i]);
+               pn.sentHandshake();
+        }
     }

     public boolean isDisconnected(PeerContext context) {

Modified: branches/freenet-freejvms/src/freenet/node/InsertHandler.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/InsertHandler.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/InsertHandler.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -35,7 +35,7 @@
     final long startTime;
     private double closestLoc;
     private short htl;
-    private InsertSender sender;
+    private CHKInsertSender sender;
     private byte[] headers;
     private BlockReceiver br;
     private Thread runThread;
@@ -64,12 +64,27 @@

     public void run() {
         try {
+               realRun();
+        } catch (Throwable t) {
+            Logger.error(this, "Caught "+t, t);
+        } finally {
+            Logger.minor(this, "Exiting InsertHandler.run() for "+uid);
+            node.unlockUID(uid);
+        }
+    }
+
+    private void realRun() {
         runThread = Thread.currentThread();

         // FIXME implement rate limiting or something!
         // Send Accepted
         Message accepted = DMT.createFNPAccepted(uid);
-        source.send(accepted);
+        try {
+                       source.send(accepted);
+               } catch (NotConnectedException e1) {
+                       Logger.minor(this, "Lost connection to source");
+                       return;
+               }

         // Source will send us a DataInsert

@@ -87,24 +102,29 @@
         Logger.minor(this, "Received "+msg);

         if(msg == null) {
-            if(source.isConnected() && startTime > 
(source.timeLastConnected()+Node.HANDSHAKE_TIMEOUT*4))
-                Logger.error(this, "Did not receive DataInsert on "+uid+" from 
"+source+" !");
-            Message tooSlow = DMT.createFNPRejectedTimeout(uid);
-            source.sendAsync(tooSlow, null);
-               Message m = DMT.createFNPInsertTransfersCompleted(uid, true);
-               source.sendAsync(m, null);
-            prb = new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, 
Node.PACKET_SIZE);
-            br = new BlockReceiver(node.usm, source, uid, prb);
-            prb.abort(RetrievalException.NO_DATAINSERT, "No DataInsert");
-            br.sendAborted(RetrievalException.NO_DATAINSERT, "No DataInsert");
-            return;
+               try {
+                       if(source.isConnected() && startTime > 
(source.timeLastConnected()+Node.HANDSHAKE_TIMEOUT*4))
+                               Logger.error(this, "Did not receive DataInsert 
on "+uid+" from "+source+" !");
+                       Message tooSlow = DMT.createFNPRejectedTimeout(uid);
+                       source.sendAsync(tooSlow, null);
+                       Message m = DMT.createFNPInsertTransfersCompleted(uid, 
true);
+                       source.sendAsync(m, null);
+                       prb = new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, 
Node.PACKET_SIZE);
+                       br = new BlockReceiver(node.usm, source, uid, prb);
+                       prb.abort(RetrievalException.NO_DATAINSERT, "No 
DataInsert");
+                       br.sendAborted(RetrievalException.NO_DATAINSERT, "No 
DataInsert");
+                       return;
+               } catch (NotConnectedException e) {
+                       Logger.minor(this, "Lost connection to source");
+                       return;
+               }
         }

         // We have a DataInsert
         headers = ((ShortBuffer)msg.getObject(DMT.BLOCK_HEADERS)).getData();
         // FIXME check the headers

-        // Now create an InsertSender, or use an existing one, or
+        // Now create an CHKInsertSender, or use an existing one, or
         // discover that the data is in the store.

         // From this point onwards, if we return cleanly we must go through 
finish().
@@ -125,7 +145,11 @@
             canCommit = true;
                msg = DMT.createFNPInsertReply(uid);
                sentSuccess = true;
-               source.send(msg);
+               try {
+                               source.send(msg);
+                       } catch (NotConnectedException e) {
+                               // Ignore
+                       }
             finish();
             return;
         }
@@ -134,16 +158,16 @@
         // What do we want to wait for?
         // If the data receive completes, that's very nice,
         // but doesn't really matter. What matters is what
-        // happens to the InsertSender. If the data receive
+        // happens to the CHKInsertSender. If the data receive
         // fails, that does matter...

-        // We are waiting for a terminal status on the InsertSender,
+        // We are waiting for a terminal status on the CHKInsertSender,
         // including REPLIED_WITH_DATA.
         // If we get transfer failed, we can check whether the receive
         // failed first. If it did it's not our fault.
         // If the receive failed, and we haven't started transferring
         // yet, we probably want to kill the sender.
-        // So we call the wait method on the InsertSender, but we
+        // So we call the wait method on the CHKInsertSender, but we
         // also have a flag locally to indicate the receive failed.
         // And if it does, we interrupt.

@@ -152,7 +176,7 @@
         while(true) {
             synchronized(sender) {
                 try {
-                       if(sender.getStatus() == InsertSender.NOT_FINISHED)
+                       if(sender.getStatus() == CHKInsertSender.NOT_FINISHED)
                                sender.wait(5000);
                 } catch (InterruptedException e) {
                     // Cool, probably this is because the receive failed...
@@ -170,12 +194,17 @@
                receivedRejectedOverload = true;
                // Forward it
                Message m = DMT.createFNPRejectedOverload(uid, false);
-               source.send(m);
+               try {
+                                       source.send(m);
+                               } catch (NotConnectedException e) {
+                                       Logger.minor(this, "Lost connection to 
source");
+                                       return;
+                               }
             }

             int status = sender.getStatus();

-            if(status == InsertSender.NOT_FINISHED) {
+            if(status == CHKInsertSender.NOT_FINISHED) {
                 continue;
             }

@@ -191,31 +220,46 @@
             // Local RejectedOverload's (fatal).
             // Internal error counts as overload. It'd only create a timeout 
otherwise, which is the same thing anyway.
             // We *really* need a good way to deal with nodes that constantly 
R_O!
-            if(status == InsertSender.TIMED_OUT ||
-                       status == InsertSender.GENERATED_REJECTED_OVERLOAD ||
-                       status == InsertSender.INTERNAL_ERROR) {
+            if(status == CHKInsertSender.TIMED_OUT ||
+                       status == CHKInsertSender.GENERATED_REJECTED_OVERLOAD ||
+                       status == CHKInsertSender.INTERNAL_ERROR) {
                 msg = DMT.createFNPRejectedOverload(uid, true);
-                source.send(msg);
+                try {
+                                       source.send(msg);
+                               } catch (NotConnectedException e) {
+                                       Logger.minor(this, "Lost connection to 
source");
+                                       return;
+                               }
                 // Might as well store it anyway.
-                if(status == InsertSender.TIMED_OUT ||
-                               status == 
InsertSender.GENERATED_REJECTED_OVERLOAD)
+                if(status == CHKInsertSender.TIMED_OUT ||
+                               status == 
CHKInsertSender.GENERATED_REJECTED_OVERLOAD)
                        canCommit = true;
                 finish();
                 return;
             }

-            if(status == InsertSender.ROUTE_NOT_FOUND || status == 
InsertSender.ROUTE_REALLY_NOT_FOUND) {
+            if(status == CHKInsertSender.ROUTE_NOT_FOUND || status == 
CHKInsertSender.ROUTE_REALLY_NOT_FOUND) {
                 msg = DMT.createFNPRouteNotFound(uid, sender.getHTL());
-                source.send(msg);
+                try {
+                                       source.send(msg);
+                               } catch (NotConnectedException e) {
+                                       Logger.minor(this, "Lost connection to 
source");
+                                       return;
+                               }
                 canCommit = true;
                 finish();
                 return;
             }

-            if(status == InsertSender.SUCCESS) {
+            if(status == CHKInsertSender.SUCCESS) {
                msg = DMT.createFNPInsertReply(uid);
                sentSuccess = true;
-               source.send(msg);
+               try {
+                                       source.send(msg);
+                               } catch (NotConnectedException e) {
+                                       Logger.minor(this, "Lost connection to 
source");
+                                       return;
+                               }
                 canCommit = true;
                 finish();
                 return;
@@ -224,19 +268,17 @@
             // Otherwise...?
             Logger.error(this, "Unknown status code: 
"+sender.getStatusString());
             msg = DMT.createFNPRejectedOverload(uid, true);
-            source.send(msg);
+            try {
+                               source.send(msg);
+                       } catch (NotConnectedException e) {
+                               // Ignore
+                       }
             finish();
             return;
         }
-        } catch (Throwable t) {
-            Logger.error(this, "Caught "+t, t);
-        } finally {
-            Logger.minor(this, "Exiting InsertHandler.run() for "+uid);
-            node.unlockUID(uid);
-        }
-    }
+       }

-    private boolean canCommit = false;
+       private boolean canCommit = false;
     private boolean sentCompletion = false;
     private Object sentCompletionLock = new Object();


Deleted: branches/freenet-freejvms/src/freenet/node/InsertSender.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/InsertSender.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/InsertSender.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,778 +0,0 @@
-package freenet.node;
-
-import java.util.HashSet;
-import java.util.Vector;
-
-import freenet.io.comm.DMT;
-import freenet.io.comm.DisconnectedException;
-import freenet.io.comm.Message;
-import freenet.io.comm.MessageFilter;
-import freenet.io.xfer.BlockTransmitter;
-import freenet.io.xfer.PartiallyReceivedBlock;
-import freenet.keys.CHKBlock;
-import freenet.keys.CHKVerifyException;
-import freenet.keys.NodeCHK;
-import freenet.support.Logger;
-
-public final class InsertSender implements Runnable {
-
-       private class AwaitingCompletion {
-               
-               /** Node we are waiting for response from */
-               final PeerNode pn;
-               /** We may be sending data to that node */
-               BlockTransmitter bt;
-               /** Have we received notice of the downstream success
-                * or failure of dependant transfers from that node?
-                * Includes timing out. */
-               boolean receivedCompletionNotice = false;
-               /** Timed out - didn't receive completion notice in
-                * the allotted time?? */
-               boolean completionTimedOut = false;
-               /** Was the notification of successful transfer? */
-               boolean completionSucceeded;
-               
-               /** Have we completed the immediate transfer? */
-               boolean completedTransfer = false;
-               /** Did it succeed? */
-               boolean transferSucceeded = false;
-               
-               AwaitingCompletion(PeerNode pn, PartiallyReceivedBlock prb) {
-                       this.pn = pn;
-                       bt = new BlockTransmitter(node.usm, pn, uid, prb);
-                       Sender s = new Sender(this);
-            Thread senderThread = new Thread(s, "Sender for "+uid+" to 
"+pn.getPeer());
-            senderThread.setDaemon(true);
-            senderThread.start();
-               }
-               
-               void completed(boolean timeout, boolean success) {
-                       synchronized(this) {
-                               if(timeout)
-                                       completionTimedOut = true;
-                               else
-                                       completionSucceeded = success;
-                               receivedCompletionNotice = true;
-                               notifyAll();
-                       }
-                       synchronized(nodesWaitingForCompletion) {
-                               nodesWaitingForCompletion.notifyAll();
-                       }
-                       if(!success) {
-                               synchronized(InsertSender.this) {
-                                       transferTimedOut = true;
-                                       InsertSender.this.notifyAll();
-                               }
-                       }
-               }
-               
-               void completedTransfer(boolean success) {
-                       synchronized(this) {
-                               transferSucceeded = success;
-                               completedTransfer = true;
-                               notifyAll();
-                       }
-                       synchronized(nodesWaitingForCompletion) {
-                               nodesWaitingForCompletion.notifyAll();
-                       }
-                       if(!success) {
-                               synchronized(InsertSender.this) {
-                                       transferTimedOut = true;
-                                       InsertSender.this.notifyAll();
-                               }
-                       }
-               }
-       }
-       
-    public class Sender implements Runnable {
-       
-       final AwaitingCompletion completion;
-       final BlockTransmitter bt;
-       
-       public Sender(AwaitingCompletion ac) {
-               this.bt = ac.bt;
-               this.completion = ac;
-       }
-       
-               public void run() {
-                       try {
-                               bt.send();
-                               if(bt.failedDueToOverload()) {
-                                       completion.completedTransfer(false);
-                               } else {
-                                       completion.completedTransfer(true);
-                               }
-                       } catch (Throwable t) {
-                               completion.completedTransfer(false);
-                               Logger.error(this, "Caught "+t, t);
-                       }
-               }
-       }
-    
-       InsertSender(NodeCHK myKey, long uid, byte[] headers, short htl, 
-            PeerNode source, Node node, PartiallyReceivedBlock prb, boolean 
fromStore, double closestLocation) {
-        this.myKey = myKey;
-        this.target = myKey.toNormalizedDouble();
-        this.uid = uid;
-        this.headers = headers;
-        this.htl = htl;
-        this.source = source;
-        this.node = node;
-        this.prb = prb;
-        this.fromStore = fromStore;
-        this.closestLocation = closestLocation;
-        this.startTime = System.currentTimeMillis();
-        this.nodesWaitingForCompletion = new Vector();
-        Thread t = new Thread(this, "InsertSender for UID "+uid+" on 
"+node.portNumber+" at "+System.currentTimeMillis());
-        t.setDaemon(true);
-        t.start();
-    }
-    
-    // Constants
-    static final int ACCEPTED_TIMEOUT = 10000;
-    static final int SEARCH_TIMEOUT = 60000;
-    static final int TRANSFER_COMPLETION_TIMEOUT = 120000;
-
-    // Basics
-    final NodeCHK myKey;
-    final double target;
-    final long uid;
-    short htl;
-    final PeerNode source;
-    final Node node;
-    final byte[] headers; // received BEFORE creation => we handle Accepted 
elsewhere
-    final PartiallyReceivedBlock prb;
-    final boolean fromStore;
-    private boolean receiveFailed = false;
-    final double closestLocation;
-    final long startTime;
-    private boolean sentRequest;
-    
-    /** List of nodes we are waiting for either a transfer completion
-     * notice or a transfer completion from. */
-    private Vector nodesWaitingForCompletion;
-    
-    /** Have all transfers completed and all nodes reported completion status? 
*/
-    private boolean allTransfersCompleted = false;
-    
-    /** Has a transfer timed out, either directly or downstream? */
-    private boolean transferTimedOut = false;
-    
-    /** Runnable which waits for completion of all transfers */
-    private CompletionWaiter cw = null;
-
-    /** Time at which we set status to a value other than NOT_FINISHED */
-    private long setStatusTime = -1;
-    
-    
-    private int status = -1;
-    /** Still running */
-    static final int NOT_FINISHED = -1;
-    /** Successful insert */
-    static final int SUCCESS = 0;
-    /** Route not found */
-    static final int ROUTE_NOT_FOUND = 1;
-    /** Internal error */
-    static final int INTERNAL_ERROR = 3;
-    /** Timed out waiting for response */
-    static final int TIMED_OUT = 4;
-    /** Locally Generated a RejectedOverload */
-    static final int GENERATED_REJECTED_OVERLOAD = 5;
-    /** Could not get off the node at all! */
-    static final int ROUTE_REALLY_NOT_FOUND = 6;
-    
-    public String toString() {
-        return super.toString()+" for "+uid;
-    }
-    
-    public void run() {
-        short origHTL = htl;
-        try {
-        HashSet nodesRoutedTo = new HashSet();
-        HashSet nodesNotIgnored = new HashSet();
-        
-        while(true) {
-            if(receiveFailed) return; // don't need to set status as killed by 
InsertHandler
-            
-            if(htl == 0) {
-                // Send an InsertReply back
-                finish(SUCCESS, null);
-                return;
-            }
-            
-            // Route it
-            PeerNode next;
-            // Can backtrack, so only route to nodes closer than we are to 
target.
-            double nextValue;
-            synchronized(node.peers) {
-                next = node.peers.closerPeer(source, nodesRoutedTo, 
nodesNotIgnored, target, true);
-                if(next != null)
-                    nextValue = next.getLocation().getValue();
-                else
-                    nextValue = -1.0;
-            }
-            
-            if(next == null) {
-                // Backtrack
-                finish(ROUTE_NOT_FOUND, null);
-                return;
-            }
-            Logger.minor(this, "Routing insert to "+next);
-            nodesRoutedTo.add(next);
-            
-            if(Math.abs(target - nextValue) > Math.abs(target - 
closestLocation)) {
-                Logger.minor(this, "Backtracking: target="+target+" 
next="+nextValue+" closest="+closestLocation);
-                htl = node.decrementHTL(source, htl);
-            }
-            
-            Message req = DMT.createFNPInsertRequest(uid, htl, myKey, 
closestLocation);
-            
-            // Wait for ack or reject... will come before even a locally 
generated DataReply
-            
-            MessageFilter mfAccepted = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(ACCEPTED_TIMEOUT).setType(DMT.FNPAccepted);
-            MessageFilter mfRejectedLoop = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(ACCEPTED_TIMEOUT).setType(DMT.FNPRejectedLoop);
-            MessageFilter mfRejectedOverload = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(ACCEPTED_TIMEOUT).setType(DMT.FNPRejectedOverload);
-            
-            // mfRejectedOverload must be the last thing in the or
-            // So its or pointer remains null
-            // Otherwise we need to recreate it below
-            mfRejectedOverload.clearOr();
-            MessageFilter mf = 
mfAccepted.or(mfRejectedLoop.or(mfRejectedOverload));
-            
-            // Send to next node
-            
-            next.send(req);
-            sentRequest = true;
-            
-            if(receiveFailed) return; // don't need to set status as killed by 
InsertHandler
-            Message msg = null;
-            
-            /*
-             * Because messages may be re-ordered, it is
-             * entirely possible that we get a non-local RejectedOverload,
-             * followed by an Accepted. So we must loop here.
-             */
-            
-            while (true) {
-               
-                               try {
-                                       msg = node.usm.waitFor(mf);
-                               } catch (DisconnectedException e) {
-                                       Logger.normal(this, "Disconnected from 
" + next
-                                                       + " while waiting for 
Accepted");
-                                       break;
-                               }
-                               
-                               if (receiveFailed)
-                                       return; // don't need to set status as 
killed by InsertHandler
-                               
-                               if (msg == null) {
-                                       // Terminal overload
-                                       // Try to propagate back to source
-                                       Logger.minor(this, "Timeout");
-                                       next.localRejectedOverload();
-                                       finish(TIMED_OUT, next);
-                                       return;
-                               }
-                               
-                               if (msg.getSpec() == DMT.FNPRejectedOverload) {
-                                       // Non-fatal - probably still have time 
left
-                                       if (msg.getBoolean(DMT.IS_LOCAL)) {
-                                               next.localRejectedOverload();
-                                               Logger.minor(this,
-                                                                               
"Local RejectedOverload, moving on to next peer");
-                                               // Give up on this one, try 
another
-                                               break;
-                                       } else {
-                                               forwardRejectedOverload();
-                                       }
-                                       continue;
-                               }
-                               
-                               if (msg.getSpec() == DMT.FNPRejectedLoop) {
-                                       next.successNotOverload();
-                                       // Loop - we don't want to send the 
data to this one
-                                       break;
-                               }
-                               
-                               if (msg.getSpec() != DMT.FNPAccepted) {
-                                       Logger.error(this,
-                                                       "Unexpected message 
waiting for Accepted: "
-                                                                       + msg);
-                                       break;
-                               }
-                               // Otherwise is an FNPAccepted
-                               break;
-                       }
-            
-            if(msg == null || msg.getSpec() != DMT.FNPAccepted) continue;
-            
-            Logger.minor(this, "Got Accepted on "+this);
-            
-            // Send them the data.
-            // Which might be the new data resulting from a collision...
-
-            Message dataInsert;
-            PartiallyReceivedBlock prbNow;
-            prbNow = prb;
-            dataInsert = DMT.createFNPDataInsert(uid, headers);
-            /** What are we waiting for now??:
-             * - FNPRouteNotFound - couldn't exhaust HTL, but send us the 
-             *   data anyway please
-             * - FNPInsertReply - used up all HTL, yay
-             * - FNPRejectOverload - propagating an overload error :(
-             * - FNPDataFound - target already has the data, and the data is
-             *   an SVK/SSK/KSK, therefore could be different to what we are
-             *   inserting.
-             */
-            
-            MessageFilter mfInsertReply = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(SEARCH_TIMEOUT).setType(DMT.FNPInsertReply);
-            mfRejectedOverload.setTimeout(SEARCH_TIMEOUT);
-            mfRejectedOverload.clearOr();
-            MessageFilter mfRouteNotFound = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(SEARCH_TIMEOUT).setType(DMT.FNPRouteNotFound);
-            MessageFilter mfDataInsertRejected = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(SEARCH_TIMEOUT).setType(DMT.FNPDataInsertRejected);
-            MessageFilter mfTimeout = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(SEARCH_TIMEOUT).setType(DMT.FNPRejectedTimeout);
-            
-            mf = 
mfInsertReply.or(mfRouteNotFound.or(mfDataInsertRejected.or(mfTimeout.or(mfRejectedOverload))));
-
-            Logger.minor(this, "Sending DataInsert");
-            if(receiveFailed) return;
-            next.send(dataInsert);
-
-            Logger.minor(this, "Sending data");
-            if(receiveFailed) return;
-            AwaitingCompletion ac = new AwaitingCompletion(next, prbNow);
-            synchronized(nodesWaitingForCompletion) {
-               nodesWaitingForCompletion.add(ac);
-               nodesWaitingForCompletion.notifyAll();
-            }
-            makeCompletionWaiter();
-
-            while (true) {
-
-                               if (receiveFailed)
-                                       return;
-                               
-                               try {
-                                       msg = node.usm.waitFor(mf);
-                               } catch (DisconnectedException e) {
-                                       Logger.normal(this, "Disconnected from 
" + next
-                                                       + " while waiting for 
InsertReply on " + this);
-                                       break;
-                               }
-                               if (receiveFailed)
-                                       return;
-                               
-                               if (msg == null || msg.getSpec() == 
DMT.FNPRejectedTimeout) {
-                                       // Timeout :(
-                                       // Fairly serious problem
-                                       Logger.error(this, "Timeout (" + msg
-                                                       + ") after Accepted in 
insert");
-                                       // Terminal overload
-                                       // Try to propagate back to source
-                                       next.localRejectedOverload();
-                                       finish(TIMED_OUT, next);
-                                       return;
-                               }
-
-                               if (msg.getSpec() == DMT.FNPRejectedOverload) {
-                                       // Probably non-fatal, if so, we have 
time left, can try next one
-                                       if (msg.getBoolean(DMT.IS_LOCAL)) {
-                                               next.localRejectedOverload();
-                                               Logger.minor(this,
-                                                               "Local 
RejectedOverload, moving on to next peer");
-                                               // Give up on this one, try 
another
-                                               break;
-                                       } else {
-                                               forwardRejectedOverload();
-                                       }
-                                       continue; // Wait for any further 
response
-                               }
-
-                               if (msg.getSpec() == DMT.FNPRouteNotFound) {
-                                       Logger.minor(this, "Rejected: RNF");
-                                       short newHtl = msg.getShort(DMT.HTL);
-                                       if (htl > newHtl)
-                                               htl = newHtl;
-                                       // Finished as far as this node is 
concerned
-                                       next.successNotOverload();
-                                       break;
-                               }
-
-                               if (msg.getSpec() == DMT.FNPDataInsertRejected) 
{
-                                       next.successNotOverload();
-                                       short reason = msg
-                                                       
.getShort(DMT.DATA_INSERT_REJECTED_REASON);
-                                       Logger.minor(this, "DataInsertRejected: 
" + reason);
-                                               if (reason == 
DMT.DATA_INSERT_REJECTED_VERIFY_FAILED) {
-                                               if (fromStore) {
-                                                       // That's odd...
-                                                       
Logger.error(this,"Verify failed on next node "
-                                                                       + next 
+ " for DataInsert but we were sending from the store!");
-                                               } else {
-                                                       try {
-                                                               if 
(!prb.allReceived())
-                                                                       
Logger.error(this,
-                                                                               
        "Did not receive all packets but next node says invalid anyway!");
-                                                               else {
-                                                                       // 
Check the data
-                                                                       new 
CHKBlock(prb.getBlock(), headers,
-                                                                               
        myKey);
-                                                                       
Logger.error(this,
-                                                                               
        "Verify failed on " + next
-                                                                               
        + " but data was valid!");
-                                                               }
-                                                       } catch 
(CHKVerifyException e) {
-                                                               Logger
-                                                                               
.normal(this,
-                                                                               
                "Verify failed because data was invalid");
-                                                       }
-                                               }
-                                               break; // What else can we do?
-                                       } else if (reason == 
DMT.DATA_INSERT_REJECTED_RECEIVE_FAILED) {
-                                               if (receiveFailed) {
-                                                       Logger.minor(this, 
"Failed to receive data, so failed to send data");
-                                               } else {
-                                                       if (prb.allReceived()) {
-                                                               
Logger.error(this, "Received all data but send failed to " + next);
-                                                       } else {
-                                                               if 
(prb.isAborted()) {
-                                                                       
Logger.normal(this, "Send failed: aborted: " + prb.getAbortReason() + ": " + 
prb.getAbortDescription());
-                                                               } else
-                                                                       
Logger.normal(this, "Send failed; have not yet received all data but not 
aborted: " + next);
-                                                       }
-                                               }
-                                               break;
-                                       }
-                                       Logger.error(this, "DataInsert 
rejected! Reason="
-                                               + 
DMT.getDataInsertRejectedReason(reason));
-                               }
-                               
-                               if (msg.getSpec() != DMT.FNPInsertReply) {
-                                       Logger.error(this, "Unknown reply: " + 
msg);
-                                       finish(INTERNAL_ERROR, next);
-                               }
-                               
-                               // Our task is complete
-                               next.successNotOverload();
-                               finish(SUCCESS, next);
-                               return;
-                       }
-               }
-        } catch (Throwable t) {
-            Logger.error(this, "Caught "+t, t);
-            if(status == NOT_FINISHED)
-               finish(INTERNAL_ERROR, null);
-        } finally {
-            node.completed(uid);
-               node.removeInsertSender(myKey, origHTL, this);
-        }
-    }
-    
-    private boolean hasForwardedRejectedOverload = false;
-    
-    synchronized boolean receivedRejectedOverload() {
-       return hasForwardedRejectedOverload;
-    }
-    
-    /** Forward RejectedOverload to the request originator.
-     * DO NOT CALL if have a *local* RejectedOverload.
-     */
-    private synchronized void forwardRejectedOverload() {
-       if(hasForwardedRejectedOverload) return;
-       hasForwardedRejectedOverload = true;
-               notifyAll();
-       }
-    
-    private void finish(int code, PeerNode next) {
-        Logger.minor(this, "Finished: "+code+" on "+this, new 
Exception("debug"));
-        if(status != NOT_FINISHED)
-               throw new IllegalStateException("finish() called with "+code+" 
when was already "+status);
-
-        setStatusTime = System.currentTimeMillis();
-        
-        if(code == ROUTE_NOT_FOUND && !sentRequest)
-               code = ROUTE_REALLY_NOT_FOUND;
-        
-        status = code;
-        
-        synchronized(this) {
-            notifyAll();
-        }
-
-        Logger.minor(this, "Set status code: "+getStatusString());
-        
-        // Now wait for transfers, or for downstream transfer notifications.
-        
-        synchronized(this) {
-               if(cw != null) {
-                       while(!allTransfersCompleted) {
-                               try {
-                                       wait(10*1000);
-                               } catch (InterruptedException e) {
-                                       // Try again
-                               }
-                       }
-               } else {
-                       // There weren't any transfers
-                       allTransfersCompleted = true;
-               }
-            notifyAll();
-        }
-        
-        Logger.minor(this, "Returning from finish()");
-    }
-
-    public int getStatus() {
-        return status;
-    }
-    
-    public short getHTL() {
-        return htl;
-    }
-
-    /**
-     * Called by InsertHandler to notify that the receive has
-     * failed.
-     */
-    public void receiveFailed() {
-        receiveFailed = true;
-    }
-
-    /**
-     * @return The current status as a string
-     */
-    public String getStatusString() {
-        if(status == SUCCESS)
-            return "SUCCESS";
-        if(status == ROUTE_NOT_FOUND)
-            return "ROUTE NOT FOUND";
-        if(status == NOT_FINISHED)
-            return "NOT FINISHED";
-        if(status == INTERNAL_ERROR)
-               return "INTERNAL ERROR";
-        if(status == TIMED_OUT)
-               return "TIMED OUT";
-        if(status == GENERATED_REJECTED_OVERLOAD)
-               return "GENERATED REJECTED OVERLOAD";
-        if(status == ROUTE_REALLY_NOT_FOUND)
-               return "ROUTE REALLY NOT FOUND";
-        return "UNKNOWN STATUS CODE: "+status;
-    }
-
-       public boolean sentRequest() {
-               return sentRequest;
-       }
-       
-       private synchronized void makeCompletionWaiter() {
-               if(cw == null) {
-                       cw = new CompletionWaiter();
-                       Thread t = new Thread(cw, "Completion waiter for "+uid);
-                       t.setDaemon(true);
-                       t.start();
-               }
-       }
-       
-       private class CompletionWaiter implements Runnable {
-               
-               public void run() {
-                       Logger.minor(this, "Starting "+this);
-outer:         while(true) {
-                       AwaitingCompletion[] waiters;
-                       synchronized(nodesWaitingForCompletion) {
-                               waiters = new 
AwaitingCompletion[nodesWaitingForCompletion.size()];
-                               waiters = (AwaitingCompletion[]) 
nodesWaitingForCompletion.toArray(waiters);
-                       }
-                       
-                       // First calculate the timeout
-                       
-                       int timeout;
-                       boolean noTimeLeft = false;
-
-                       long now = System.currentTimeMillis();
-                       if(status == NOT_FINISHED) {
-                               // Wait 5 seconds, then try again
-                               timeout = 5000;
-                       } else {
-                               // Completed, wait for everything
-                               timeout = (int)Math.min(Integer.MAX_VALUE, 
(setStatusTime + TRANSFER_COMPLETION_TIMEOUT) - now);
-                       }
-                       if(timeout <= 0) {
-                               noTimeLeft = true;
-                               timeout = 1;
-                       }
-                       
-                       MessageFilter mf = null;
-                       for(int i=0;i<waiters.length;i++) {
-                               AwaitingCompletion awc = waiters[i];
-                               if(!awc.pn.isConnected()) {
-                                       Logger.normal(this, "Disconnected: 
"+awc.pn+" in "+InsertSender.this);
-                                       continue;
-                               }
-                               if(!awc.receivedCompletionNotice) {
-                                       MessageFilter m =
-                                               
MessageFilter.create().setField(DMT.UID, 
uid).setType(DMT.FNPInsertTransfersCompleted).setSource(awc.pn).setTimeout(timeout);
-                                       if(mf == null)
-                                               mf = m;
-                                       else
-                                               mf = m.or(mf);
-                                       Logger.minor(this, "Waiting for 
"+awc.pn.getPeer());
-                               }
-                       }
-                       
-                       if(mf == null) {
-                               if(status != NOT_FINISHED) {
-                                       if(nodesWaitingForCompletion.size() != 
waiters.length) {
-                                               // Added another one
-                                               Logger.minor(this, "Looping 
(mf==null): waiters="+waiters.length+" but 
waiting="+nodesWaitingForCompletion.size());
-                                               continue;
-                                       }
-                                       if(waitForCompletedTransfers(waiters, 
timeout, noTimeLeft)) {
-                                               synchronized(InsertSender.this) 
{
-                                                       allTransfersCompleted = 
true;
-                                                       
InsertSender.this.notifyAll();
-                                               }
-                                               return;
-                                       }
-                                       if(noTimeLeft) {
-                                               for(int 
i=0;i<waiters.length;i++) {
-                                                       
if(!waiters[i].pn.isConnected()) continue;
-                                                       
if(!waiters[i].completedTransfer) {
-                                                               
waiters[i].completedTransfer(false);
-                                                       }
-                                               }
-                                               synchronized(InsertSender.this) 
{
-                                                       allTransfersCompleted = 
true;
-                                                       
InsertSender.this.notifyAll();
-                                               }
-                                               return;
-                                       }
-                                       // Otherwise, not finished, go back 
around loop
-                                       continue;
-                               } else {
-                                       // Still waiting for request 
completion, so more may be added
-                                       synchronized(nodesWaitingForCompletion) 
{
-                                               try {
-                                                       
nodesWaitingForCompletion.wait(timeout);
-                                               } catch (InterruptedException 
e) {
-                                                       // Go back around the 
loop
-                                               }
-                                       }
-                               }
-                               continue;
-                       } else {
-                               Message m;
-                               try {
-                                       m = node.usm.waitFor(mf);
-                               } catch (DisconnectedException e) {
-                                       // Which one? I have no idea.
-                                       // Go around the loop again.
-                                       continue;
-                               }
-                               if(m != null) {
-                                       // Process message
-                                       PeerNode pn = (PeerNode) m.getSource();
-                                       boolean processed = false;
-                                       for(int i=0;i<waiters.length;i++) {
-                                               PeerNode p = waiters[i].pn;
-                                               if(p == pn) {
-                                                       boolean anyTimedOut = 
m.getBoolean(DMT.ANY_TIMED_OUT);
-                                                       
waiters[i].completed(false, !anyTimedOut);
-                                                       if(anyTimedOut) {
-                                                               
synchronized(InsertSender.this) {
-                                                                       
if(!transferTimedOut) {
-                                                                               
transferTimedOut = true;
-                                                                               
InsertSender.this.notifyAll();
-                                                                       }
-                                                               }
-                                                       }
-                                                       processed = true;
-                                                       break;
-                                               }
-                                       }
-                                       if(!processed) {
-                                               Logger.error(this, "Did not 
process message: "+m+" on "+this);
-                                       }
-                               } else {
-                                       if(nodesWaitingForCompletion.size() > 
waiters.length) {
-                                               // Added another one
-                                               Logger.minor(this, "Looping: 
waiters="+waiters.length+" but waiting="+nodesWaitingForCompletion.size());
-                                               continue;
-                                       }
-                                       if(noTimeLeft) {
-                                               Logger.minor(this, "Overall 
timeout on "+InsertSender.this);
-                                               for(int 
i=0;i<waiters.length;i++) {
-                                                       
if(!waiters[i].pn.isConnected()) continue;
-                                                       
if(!waiters[i].receivedCompletionNotice)
-                                                               
waiters[i].completed(false, false);
-                                                       
if(!waiters[i].completedTransfer)
-                                                               
waiters[i].completedTransfer(false);
-                                               }
-                                               synchronized(InsertSender.this) 
{
-                                                       transferTimedOut = true;
-                                                       allTransfersCompleted = 
true;
-                                                       
InsertSender.this.notifyAll();
-                                               }
-                                               return;
-                                       }
-                               }
-                       }
-               }
-               }
-
-               /** @return True if all transfers have completed, false 
otherwise. */
-               private boolean waitForCompletedTransfers(AwaitingCompletion[] 
waiters, int timeout, boolean noTimeLeft) {
-                       // MAYBE all done
-                       boolean completedTransfers = true;
-                       synchronized(nodesWaitingForCompletion) {
-                               for(int i=0;i<waiters.length;i++) {
-                                       if(!waiters[i].pn.isConnected()) 
continue;
-                                       if(!waiters[i].completedTransfer) {
-                                               completedTransfers = false;
-                                               break;
-                                       }
-                               }
-                               if(!completedTransfers) {
-                                       try {
-                                               if(!noTimeLeft) {
-                                                       
nodesWaitingForCompletion.wait(timeout);
-                                               } else {
-                                                       // Timed out
-                                               }
-                                               completedTransfers = true;
-                                               for(int 
i=0;i<waiters.length;i++) {
-                                                       
if(!waiters[i].pn.isConnected()) continue;
-                                                       
if(!waiters[i].completedTransfer) {
-                                                               
completedTransfers = false;
-                                                               break;
-                                                       }
-                                               }
-                                       } catch (InterruptedException e) {
-                                               // Ignore
-                                       }
-                               }
-                       }
-                       if(completedTransfers) {
-                               // All done!
-                               Logger.minor(this, "Completed, 
status="+getStatusString()+", nothing left to wait for.");
-                               synchronized(InsertSender.this) {
-                                       allTransfersCompleted = true;
-                                       InsertSender.this.notifyAll();
-                               }
-                               return true;
-                       } else return false;
-               }
-
-               public String toString() {
-                       return super.toString()+" for "+uid;
-               }
-       }
-
-       public boolean completed() {
-               return allTransfersCompleted;
-       }
-
-       public boolean anyTransfersFailed() {
-               return transferTimedOut;
-       }
-}

Modified: branches/freenet-freejvms/src/freenet/node/KeyTracker.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/KeyTracker.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/KeyTracker.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -717,6 +717,7 @@
         UpdatableSortedLinkedListItem[] items;
         int[] packetNumbers;
         int realLength;
+        Logger.minor(this, "Grabbing ack requests");
         try {
         synchronized(ackRequestQueue) {
             long now = System.currentTimeMillis();
@@ -734,18 +735,20 @@
                         continue;
                     }
                     packetNumbers[realLength++] = packetNumber;
-                    Logger.minor(this, "Grabbing ack request "+packetNumber+" 
from "+this);
+                    Logger.minor(this, "Grabbing ack request "+packetNumber+" 
("+realLength+") from "+this);
                     qr.sent();
                 } else {
-                    Logger.minor(this, "Ignoring ack request "+packetNumber+" 
- will become active in "+(qr.activeTime-now)+" ms on "+this+" - "+qr);
+                    Logger.minor(this, "Ignoring ack request "+packetNumber+" 
("+realLength+") - will become active in "+(qr.activeTime-now)+" ms on "+this+" 
- "+qr);
                 }
             }
         }
         } catch (UpdatableSortedLinkedListKilledException e) {
                throw new NotConnectedException();
         }
+        Logger.minor(this, "realLength now "+realLength);
         int[] trimmedPacketNumbers = new int[realLength];
         System.arraycopy(packetNumbers, 0, trimmedPacketNumbers, 0, 
realLength);
+        Logger.minor(this, "Returning "+trimmedPacketNumbers.length+" 
ackRequests");
         return trimmedPacketNumbers;
     }


Modified: branches/freenet-freejvms/src/freenet/node/LocationManager.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/LocationManager.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/LocationManager.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -152,7 +152,7 @@
                             } finally {
                                 unlock();
                             }
-                        }
+                        } else unlock();
                     } else {
                         continue;
                     }
@@ -532,7 +532,10 @@
      * false if it was already locked.
      */
     synchronized boolean lock() {
-        if(locked) return false;
+        if(locked) {
+               Logger.minor(this, "Already locked");
+               return false;
+        }
         Logger.minor(this, "Locking on port "+node.portNumber);
         locked = true;
         lockedTime = System.currentTimeMillis();

Modified: branches/freenet-freejvms/src/freenet/node/LowLevelGetException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/LowLevelGetException.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/LowLevelGetException.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -56,7 +56,7 @@
                this.code = code;
        }

-       LowLevelGetException(int reason) {
+       public LowLevelGetException(int reason) {
                super(getMessage(reason));
                this.code = reason;
        }

Modified: branches/freenet-freejvms/src/freenet/node/LowLevelPutException.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/LowLevelPutException.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/LowLevelPutException.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -11,6 +11,8 @@
        public static final int REJECTED_OVERLOAD = 3;
        /** Insert could not get off the node at all */
        public static final int ROUTE_REALLY_NOT_FOUND = 4;
+       /** Insert collided with pre-existing, different content. Can only 
happen with KSKs and SSKs. */
+       public static final int COLLISION = 5;

        /** Failure code */
        public final int code;
@@ -25,6 +27,8 @@
                        return "A node downstream either timed out or was 
overloaded (retry)";
                case ROUTE_REALLY_NOT_FOUND:
                        return "The insert could not get off the node at all";
+               case COLLISION:
+                       return "The insert collided with different data of the 
same key already on the network";
                default:
                        return "Unknown error code: "+reason;
                }

Modified: branches/freenet-freejvms/src/freenet/node/Node.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/Node.java        2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/Node.java        2006-02-03 
22:55:27 UTC (rev 7999)
@@ -17,16 +17,24 @@
 import java.io.OutputStreamWriter;
 import java.net.InetAddress;
 import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Hashtable;
 import java.util.Iterator;

+import snmplib.SNMPAgent;
+import snmplib.SNMPStarter;
+
 import freenet.client.ArchiveManager;
 import freenet.client.HighLevelSimpleClient;
 import freenet.client.HighLevelSimpleClientImpl;
+import freenet.client.async.ClientRequestScheduler;
+import freenet.clients.http.FproxyToadlet;
+import freenet.clients.http.SimpleToadletServer;
+import freenet.crypt.DSAPublicKey;
 import freenet.crypt.DiffieHellman;
 import freenet.crypt.RandomSource;
 import freenet.crypt.Yarrow;
@@ -46,14 +54,23 @@
 import freenet.keys.ClientCHKBlock;
 import freenet.keys.ClientKey;
 import freenet.keys.ClientKeyBlock;
+import freenet.keys.ClientSSK;
+import freenet.keys.ClientSSKBlock;
+import freenet.keys.Key;
 import freenet.keys.KeyBlock;
+import freenet.keys.KeyVerifyException;
 import freenet.keys.NodeCHK;
-import freenet.store.BaseFreenetStore;
+import freenet.keys.NodeSSK;
+import freenet.keys.SSKBlock;
+import freenet.keys.SSKVerifyException;
+import freenet.node.fcp.FCPServer;
 import freenet.store.BerkeleyDBFreenetStore;
 import freenet.store.FreenetStore;
 import freenet.support.BucketFactory;
 import freenet.support.FileLoggerHook;
 import freenet.support.HexUtil;
+import freenet.support.ImmutableByteArrayWrapper;
+import freenet.support.LRUHashtable;
 import freenet.support.LRUQueue;
 import freenet.support.Logger;
 import freenet.support.PaddedEphemerallyEncryptedBucketFactory;
@@ -75,7 +92,7 @@
 /**
  * @author amphibian
  */
-public class Node implements QueueingSimpleLowLevelClient {
+public class Node {

        static final long serialVersionUID = -1;

@@ -128,13 +145,17 @@
     final int portNumber;

     /** These 3 are private because must be protected by synchronized(this) */
-    /** The datastore */
-    private final FreenetStore datastore;
+    /** The CHK datastore */
+    private final FreenetStore chkDatastore;
+    /** The SSK datastore */
+    private final FreenetStore sskDatastore;
+    /** The store of DSAPublicKeys (by hash) */
+    private final FreenetStore pubKeyDatastore;
     /** RequestSender's currently running, by KeyHTLPair */
     private final HashMap requestSenders;
     /** RequestSender's currently transferring, by key */
     private final HashMap transferringRequestSenders;
-    /** InsertSender's currently running, by KeyHTLPair */
+    /** CHKInsertSender's currently running, by KeyHTLPair */
     private final HashMap insertSenders;

     private final HashSet runningUIDs;
@@ -147,7 +168,7 @@
     String myName;
     final LocationManager lm;
     final PeerManager peers; // my peers
-    final RandomSource random; // strong RNG
+    public final RandomSource random; // strong RNG
     final UdpSocketManager usm;
     final FNPPacketMangler packetMangler;
     final PacketSender ps;
@@ -156,6 +177,8 @@
     final String filenamesPrefix;
     final FilenameGenerator tempFilenameGenerator;
     final FileLoggerHook fileLoggerHook;
+    static final int MAX_CACHED_KEYS = 1000;
+    final LRUHashtable cachedPubKeys;
     final boolean testnetEnabled;
     final int testnetPort;
     static short MAX_HTL = 10;
@@ -173,7 +196,7 @@

     // Client stuff
     final ArchiveManager archiveManager;
-    final BucketFactory tempBucketFactory;
+    public final BucketFactory tempBucketFactory;
     final RequestThrottle requestThrottle;
     final RequestStarter requestStarter;
     final RequestThrottle insertThrottle;
@@ -181,6 +204,8 @@
     final File downloadDir;
     final TestnetHandler testnetHandler;
     final TestnetStatusUploader statusUploader;
+    public final ClientRequestScheduler fetchScheduler;
+    public final ClientRequestScheduler putScheduler;

     // Client stuff that needs to be configged - FIXME
     static final int MAX_ARCHIVE_HANDLERS = 200; // don't take up much RAM... 
FIXME
@@ -189,6 +214,9 @@
     static final long MAX_ARCHIVED_FILE_SIZE = 1024*1024; // arbitrary... FIXME
     static final int MAX_CACHED_ELEMENTS = 1024; // equally arbitrary! FIXME 
hopefully we can cache many of these though

+    // Helpers
+       public final InetAddress localhostAddress;
+    
     /**
      * Read all storable settings (identity etc) from the node file.
      * @param filename The name of the file to read from.
@@ -328,18 +356,30 @@
             }
         }
         DiffieHellman.init(yarrow);
-        Node n = new Node(port, yarrow, overrideIP, "", 1000 / 
packetsPerSecond, true, logger);
+        Node n = new Node(port, yarrow, overrideIP, "", 1000 / 
packetsPerSecond, true, logger, 32768 /* 1GB */);
         n.start(new StaticSwapRequestInterval(2000));
         new TextModeClientInterface(n);
         Thread t = new Thread(new MemoryChecker(), "Memory checker");
         t.setPriority(Thread.MAX_PRIORITY);
         t.start();
+        SimpleToadletServer server = new SimpleToadletServer(port+2000);
+        FproxyToadlet fproxy = new 
FproxyToadlet(n.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS));
+        server.register(fproxy, "/", false);
+        System.out.println("Starting fproxy on port "+(port+2000));
+        new FCPServer(port+3000, n);
+        System.out.println("Starting FCP server on port "+(port+3000));
+        SNMPAgent.setSNMPPort(port+4000);
+        System.out.println("Starting SNMP server on port "+(port+4000));
+        SNMPStarter.initialize();
+        //server.register(fproxy, "/SSK@", false);
+        //server.register(fproxy, "/KSK@", false);
     }

     // FIXME - the whole overrideIP thing is a hack to avoid config
     // Implement the config!
-    Node(int port, RandomSource rand, InetAddress overrideIP, String prefix, 
int throttleInterval, boolean enableTestnet, FileLoggerHook logger) {
+    Node(int port, RandomSource rand, InetAddress overrideIP, String prefix, 
int throttleInterval, boolean enableTestnet, FileLoggerHook logger, int 
maxStoreKeys) {
        this.fileLoggerHook = logger;
+       cachedPubKeys = new LRUHashtable();
        if(enableTestnet) {
                Logger.error(this, "WARNING: ENABLING TESTNET CODE! This may 
seriously jeopardize your anonymity!");
                testnetEnabled = true;
@@ -352,6 +392,12 @@
                testnetHandler = null;
                statusUploader = null;
        }
+       try {
+                       localhostAddress = InetAddress.getByName("127.0.0.1");
+               } catch (UnknownHostException e3) {
+                       // Does not do a reverse lookup, so this is impossible
+                       throw new Error(e3);
+               }
         portNumber = port;
         startupTime = System.currentTimeMillis();
         recentlyCompletedIDs = new LRUQueue();
@@ -361,7 +407,9 @@
         downloadDir = new File("downloads");
         downloadDir.mkdir();
         try {
-            datastore = new BerkeleyDBFreenetStore(prefix+"store-"+portNumber, 
32768); // 1GB
+            chkDatastore = new 
BerkeleyDBFreenetStore(prefix+"store-"+portNumber, maxStoreKeys, 32768, 
CHKBlock.TOTAL_HEADERS_LENGTH);
+            sskDatastore = new 
BerkeleyDBFreenetStore(prefix+"sskstore-"+portNumber, maxStoreKeys, 1024, 
SSKBlock.TOTAL_HEADERS_LENGTH);
+            pubKeyDatastore = new 
BerkeleyDBFreenetStore(prefix+"pubkeystore-"+portNumber, maxStoreKeys, 
DSAPublicKey.PADDED_SIZE, 0);
         } catch (FileNotFoundException e1) {
             Logger.error(this, "Could not open datastore: "+e1, e1);
             System.err.println("Could not open datastore: "+e1);
@@ -440,11 +488,21 @@
                tempBucketFactory = new 
PaddedEphemerallyEncryptedBucketFactory(new 
TempBucketFactory(tempFilenameGenerator), random, 1024);
                archiveManager = new ArchiveManager(MAX_ARCHIVE_HANDLERS, 
MAX_CACHED_ARCHIVE_DATA, MAX_ARCHIVE_SIZE, MAX_ARCHIVED_FILE_SIZE, 
MAX_CACHED_ELEMENTS, random, tempFilenameGenerator);
                requestThrottle = new RequestThrottle(5000, 2.0F);
-               requestStarter = new RequestStarter(requestThrottle, "Request 
starter ("+portNumber+")");
+               requestStarter = new RequestStarter(this, requestThrottle, 
"Request starter ("+portNumber+")");
+               fetchScheduler = new ClientRequestScheduler(false, random, 
requestStarter, this);
+               requestStarter.setScheduler(fetchScheduler);
+               requestStarter.start();
                //insertThrottle = new ChainedRequestThrottle(10000, 2.0F, 
requestThrottle);
                // FIXME reenable the above
                insertThrottle = new RequestThrottle(10000, 2.0F);
-               insertStarter = new RequestStarter(insertThrottle, "Insert 
starter ("+portNumber+")");
+               insertStarter = new RequestStarter(this, insertThrottle, 
"Insert starter ("+portNumber+")");
+               putScheduler = new ClientRequestScheduler(true, random, 
insertStarter, this);
+               insertStarter.setScheduler(putScheduler);
+               insertStarter.start();
+               if(testnetHandler != null)
+                       testnetHandler.start();
+               if(statusUploader != null)
+                       statusUploader.start();
                System.err.println("Created Node on port "+port);
     }

@@ -455,32 +513,27 @@
         usm.start();
     }

-    public ClientKeyBlock getKey(ClientKey key, boolean localOnly, 
RequestStarterClient client, boolean cache) throws LowLevelGetException {
-       if(localOnly)
-               return realGetKey(key, localOnly, cache);
-       else
-               return client.getKey(key, localOnly, cache);
-    }
-    
-    public ClientKeyBlock realGetKey(ClientKey key, boolean localOnly, boolean 
cache) throws LowLevelGetException {
+    public ClientKeyBlock realGetKey(ClientKey key, boolean localOnly, boolean 
cache, boolean ignoreStore) throws LowLevelGetException {
        if(key instanceof ClientCHK)
-               return realGetCHK((ClientCHK)key, localOnly, cache);
+               return realGetCHK((ClientCHK)key, localOnly, cache, 
ignoreStore);
+       else if(key instanceof ClientSSK)
+               return realGetSSK((ClientSSK)key, localOnly, cache, 
ignoreStore);
        else
-               throw new IllegalArgumentException("Not a CHK: "+key);
+               throw new IllegalArgumentException("Not a CHK or SSK: "+key);
     }

     /**
      * Really trivially simple client interface.
      * Either it succeeds or it doesn't.
      */
-    ClientCHKBlock realGetCHK(ClientCHK key, boolean localOnly, boolean cache) 
throws LowLevelGetException {
+    ClientCHKBlock realGetCHK(ClientCHK key, boolean localOnly, boolean cache, 
boolean ignoreStore) throws LowLevelGetException {
        long startTime = System.currentTimeMillis();
        long uid = random.nextLong();
         if(!lockUID(uid)) {
             Logger.error(this, "Could not lock UID just randomly generated: 
"+uid+" - probably indicates broken PRNG");
             throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
         }
-        Object o = makeRequestSender(key.getNodeCHK(), MAX_HTL, uid, null, 
lm.loc.getValue(), localOnly, cache);
+        Object o = makeRequestSender(key.getNodeCHK(), MAX_HTL, uid, null, 
lm.loc.getValue(), localOnly, cache, ignoreStore);
         if(o instanceof CHKBlock) {
             try {
                 return new ClientCHKBlock((CHKBlock)o, key);
@@ -547,6 +600,8 @@
                        case RequestSender.GENERATED_REJECTED_OVERLOAD:
                        case RequestSender.TIMED_OUT:
                                throw new 
LowLevelGetException(LowLevelGetException.REJECTED_OVERLOAD);
+                       case RequestSender.INTERNAL_ERROR:
+                               throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
                        default:
                                Logger.error(this, "Unknown RequestSender code 
in getCHK: "+rs.getStatus()+" on "+rs);
                                throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
@@ -555,15 +610,109 @@
         }
     }

-    public void putCHK(ClientCHKBlock block, RequestStarterClient starter, 
boolean cache) throws LowLevelPutException {
-               starter.putCHK(block, cache);
+    /**
+     * Really trivially simple client interface.
+     * Either it succeeds or it doesn't.
+     */
+    ClientSSKBlock realGetSSK(ClientSSK key, boolean localOnly, boolean cache, 
boolean ignoreStore) throws LowLevelGetException {
+       long startTime = System.currentTimeMillis();
+       long uid = random.nextLong();
+        if(!lockUID(uid)) {
+            Logger.error(this, "Could not lock UID just randomly generated: 
"+uid+" - probably indicates broken PRNG");
+            throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
+        }
+        Object o = makeRequestSender(key.getNodeKey(), MAX_HTL, uid, null, 
lm.loc.getValue(), localOnly, cache, ignoreStore);
+        if(o instanceof SSKBlock) {
+            try {
+               SSKBlock block = (SSKBlock)o;
+               key.setPublicKey(block.getPubKey());
+                return new ClientSSKBlock(block, key);
+            } catch (SSKVerifyException e) {
+                Logger.error(this, "Does not verify: "+e, e);
+                throw new 
LowLevelGetException(LowLevelGetException.DECODE_FAILED);
+            }
+        }
+        if(o == null) {
+               throw new 
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND_IN_STORE);
+        }
+        RequestSender rs = (RequestSender)o;
+        boolean rejectedOverload = false;
+        while(true) {
+               if(rs.waitUntilStatusChange() && (!rejectedOverload)) {
+                       requestThrottle.requestRejectedOverload();
+                       rejectedOverload = true;
+               }
+
+               int status = rs.getStatus();
+               
+               if(status == RequestSender.NOT_FINISHED) 
+                       continue;
+               
+               if(status == RequestSender.TIMED_OUT ||
+                               status == 
RequestSender.GENERATED_REJECTED_OVERLOAD) {
+                       if(!rejectedOverload) {
+                       requestThrottle.requestRejectedOverload();
+                               rejectedOverload = true;
+                       }
+               } else {
+                       if(status == RequestSender.DATA_NOT_FOUND ||
+                                       status == RequestSender.SUCCESS ||
+                                       status == RequestSender.ROUTE_NOT_FOUND 
||
+                                       status == RequestSender.VERIFY_FAILURE) 
{
+                               long rtt = System.currentTimeMillis() - 
startTime;
+                               requestThrottle.requestCompleted(rtt);
+                       }
+               }
+               
+               if(rs.getStatus() == RequestSender.SUCCESS) {
+                       try {
+                               SSKBlock block = rs.getSSKBlock();
+                               key.setPublicKey(block.getPubKey());
+                               return new ClientSSKBlock(block, key);
+                       } catch (SSKVerifyException e) {
+                               Logger.error(this, "Does not verify: "+e, e);
+                               throw new 
LowLevelGetException(LowLevelGetException.DECODE_FAILED);                
+                       }
+               } else {
+                       switch(rs.getStatus()) {
+                       case RequestSender.NOT_FINISHED:
+                               Logger.error(this, "RS still running in 
getCHK!: "+rs);
+                               throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
+                       case RequestSender.DATA_NOT_FOUND:
+                               throw new 
LowLevelGetException(LowLevelGetException.DATA_NOT_FOUND);
+                       case RequestSender.ROUTE_NOT_FOUND:
+                               throw new 
LowLevelGetException(LowLevelGetException.ROUTE_NOT_FOUND);
+                       case RequestSender.TRANSFER_FAILED:
+                               Logger.error(this, "WTF? Transfer failed on an 
SSK? on "+uid);
+                               throw new 
LowLevelGetException(LowLevelGetException.TRANSFER_FAILED);
+                       case RequestSender.VERIFY_FAILURE:
+                               throw new 
LowLevelGetException(LowLevelGetException.VERIFY_FAILED);
+                       case RequestSender.GENERATED_REJECTED_OVERLOAD:
+                       case RequestSender.TIMED_OUT:
+                               throw new 
LowLevelGetException(LowLevelGetException.REJECTED_OVERLOAD);
+                       case RequestSender.INTERNAL_ERROR:
+                       default:
+                               Logger.error(this, "Unknown RequestSender code 
in getCHK: "+rs.getStatus()+" on "+rs);
+                               throw new 
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR);
+                       }
+               }
+        }
     }
+
+    public void realPut(ClientKeyBlock block, boolean cache) throws 
LowLevelPutException {
+       if(block instanceof ClientCHKBlock)
+               realPutCHK((ClientCHKBlock)block, cache);
+       else if(block instanceof ClientSSKBlock)
+               realPutSSK((ClientSSKBlock)block, cache);
+       else
+               throw new IllegalArgumentException("Unknown put type 
"+block.getClass());
+    }

     public void realPutCHK(ClientCHKBlock block, boolean cache) throws 
LowLevelPutException {
         byte[] data = block.getData();
-        byte[] headers = block.getHeader();
+        byte[] headers = block.getHeaders();
         PartiallyReceivedBlock prb = new 
PartiallyReceivedBlock(PACKETS_IN_BLOCK, PACKET_SIZE, data);
-        InsertSender is;
+        CHKInsertSender is;
         long uid = random.nextLong();
         if(!lockUID(uid)) {
             Logger.error(this, "Could not lock UID just randomly generated: 
"+uid+" - probably indicates broken PRNG");
@@ -573,26 +722,26 @@
         synchronized(this) {
                if(cache) {
                        try {
-                               datastore.put(block);
+                               chkDatastore.put(block);
                        } catch (IOException e) {
                                Logger.error(this, "Datastore failure: "+e, e);
                        }
                }
-            is = makeInsertSender(block.getClientKey().getNodeCHK(), 
+            is = makeInsertSender((NodeCHK)block.getClientKey().getNodeKey(), 
                     MAX_HTL, uid, null, headers, prb, false, 
lm.getLocation().getValue(), cache);
         }
         boolean hasForwardedRejectedOverload = false;
         // Wait for status
         while(true) {
                synchronized(is) {
-                       if(is.getStatus() == InsertSender.NOT_FINISHED) {
+                       if(is.getStatus() == CHKInsertSender.NOT_FINISHED) {
                                try {
                                        is.wait(5*1000);
                                } catch (InterruptedException e) {
                                        // Ignore
                                }
                        }
-                       if(is.getStatus() != InsertSender.NOT_FINISHED) break;
+                       if(is.getStatus() != CHKInsertSender.NOT_FINISHED) 
break;
                }
                if((!hasForwardedRejectedOverload) && 
is.receivedRejectedOverload()) {
                        hasForwardedRejectedOverload = true;
@@ -621,8 +770,8 @@
         // Finished?
         if(!hasForwardedRejectedOverload) {
                // Is it ours? Did we send a request?
-               if(is.sentRequest() && is.uid == uid && (is.getStatus() == 
InsertSender.ROUTE_NOT_FOUND 
-                               || is.getStatus() == InsertSender.SUCCESS)) {
+               if(is.sentRequest() && is.uid == uid && (is.getStatus() == 
CHKInsertSender.ROUTE_NOT_FOUND 
+                               || is.getStatus() == CHKInsertSender.SUCCESS)) {
                        // It worked!
                        long endTime = System.currentTimeMillis();
                        long len = endTime - startTime;
@@ -630,38 +779,142 @@
                }
         }

-        if(is.getStatus() == InsertSender.SUCCESS) {
+        if(is.getStatus() == CHKInsertSender.SUCCESS) {
                Logger.normal(this, "Succeeded inserting "+block);
                return;
         } else {
                int status = is.getStatus();
                String msg = "Failed inserting "+block+" : 
"+is.getStatusString();
-               if(status == InsertSender.ROUTE_NOT_FOUND)
+               if(status == CHKInsertSender.ROUTE_NOT_FOUND)
                        msg += " - this is normal on small networks; the data 
will still be propagated, but it can't find the 20+ nodes needed for full 
success";
-               if(is.getStatus() != InsertSender.ROUTE_NOT_FOUND)
+               if(is.getStatus() != CHKInsertSender.ROUTE_NOT_FOUND)
                        Logger.error(this, msg);
                else
                        Logger.normal(this, msg);
                switch(is.getStatus()) {
-               case InsertSender.NOT_FINISHED:
+               case CHKInsertSender.NOT_FINISHED:
                        Logger.error(this, "IS still running in putCHK!: "+is);
                        throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
-               case InsertSender.GENERATED_REJECTED_OVERLOAD:
-               case InsertSender.TIMED_OUT:
+               case CHKInsertSender.GENERATED_REJECTED_OVERLOAD:
+               case CHKInsertSender.TIMED_OUT:
                        throw new 
LowLevelPutException(LowLevelPutException.REJECTED_OVERLOAD);
-               case InsertSender.ROUTE_NOT_FOUND:
+               case CHKInsertSender.ROUTE_NOT_FOUND:
                        throw new 
LowLevelPutException(LowLevelPutException.ROUTE_NOT_FOUND);
-               case InsertSender.ROUTE_REALLY_NOT_FOUND:
+               case CHKInsertSender.ROUTE_REALLY_NOT_FOUND:
                        throw new 
LowLevelPutException(LowLevelPutException.ROUTE_REALLY_NOT_FOUND);
-               case InsertSender.INTERNAL_ERROR:
+               case CHKInsertSender.INTERNAL_ERROR:
                        throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
                default:
-                       Logger.error(this, "Unknown InsertSender code in 
putCHK: "+is.getStatus()+" on "+is);
+                       Logger.error(this, "Unknown CHKInsertSender code in 
putCHK: "+is.getStatus()+" on "+is);
                        throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
                }
         }
     }

+    public void realPutSSK(ClientSSKBlock block, boolean cache) throws 
LowLevelPutException {
+        byte[] data = block.getRawData();
+        byte[] headers = block.getRawHeaders();
+        SSKInsertSender is;
+        long uid = random.nextLong();
+        if(!lockUID(uid)) {
+            Logger.error(this, "Could not lock UID just randomly generated: 
"+uid+" - probably indicates broken PRNG");
+            throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
+        }
+        long startTime = System.currentTimeMillis();
+        synchronized(this) {
+               if(cache) {
+                       try {
+                               sskDatastore.put(block);
+                       } catch (IOException e) {
+                               Logger.error(this, "Datastore failure: "+e, e);
+                       }
+               }
+            is = makeInsertSender(block, 
+                    MAX_HTL, uid, null, false, lm.getLocation().getValue(), 
cache);
+        }
+        boolean hasForwardedRejectedOverload = false;
+        // Wait for status
+        while(true) {
+               synchronized(is) {
+                       if(is.getStatus() == SSKInsertSender.NOT_FINISHED) {
+                               try {
+                                       is.wait(5*1000);
+                               } catch (InterruptedException e) {
+                                       // Ignore
+                               }
+                       }
+                       if(is.getStatus() != SSKInsertSender.NOT_FINISHED) 
break;
+               }
+               if((!hasForwardedRejectedOverload) && 
is.receivedRejectedOverload()) {
+                       hasForwardedRejectedOverload = true;
+                       insertThrottle.requestRejectedOverload();
+               }
+        }
+        
+        // Wait for completion
+        while(true) {
+               synchronized(is) {
+                       if(is.getStatus() != SSKInsertSender.NOT_FINISHED) 
break;
+                       try {
+                                       is.wait(10*1000);
+                               } catch (InterruptedException e) {
+                                       // Go around again
+                               }
+               }
+        }
+        
+        Logger.minor(this, "Completed "+uid+" 
overload="+hasForwardedRejectedOverload+" "+is.getStatusString());
+        
+        // Finished?
+        if(!hasForwardedRejectedOverload) {
+               // Is it ours? Did we send a request?
+               if(is.sentRequest() && is.uid == uid && (is.getStatus() == 
SSKInsertSender.ROUTE_NOT_FOUND 
+                               || is.getStatus() == SSKInsertSender.SUCCESS)) {
+                       // It worked!
+                       long endTime = System.currentTimeMillis();
+                       long len = endTime - startTime;
+                       insertThrottle.requestCompleted(len);
+               }
+        }
+
+        if(is.hasCollided()) {
+               // Store it locally so it can be fetched immediately, and 
overwrites any locally inserted.
+               store(is.getBlock());
+               throw new LowLevelPutException(LowLevelPutException.COLLISION);
+        }
+        
+        if(is.getStatus() == SSKInsertSender.SUCCESS) {
+               Logger.normal(this, "Succeeded inserting "+block);
+               return;
+        } else {
+               int status = is.getStatus();
+               String msg = "Failed inserting "+block+" : 
"+is.getStatusString();
+               if(status == CHKInsertSender.ROUTE_NOT_FOUND)
+                       msg += " - this is normal on small networks; the data 
will still be propagated, but it can't find the 20+ nodes needed for full 
success";
+               if(is.getStatus() != SSKInsertSender.ROUTE_NOT_FOUND)
+                       Logger.error(this, msg);
+               else
+                       Logger.normal(this, msg);
+               switch(is.getStatus()) {
+               case SSKInsertSender.NOT_FINISHED:
+                       Logger.error(this, "IS still running in putCHK!: "+is);
+                       throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
+               case SSKInsertSender.GENERATED_REJECTED_OVERLOAD:
+               case SSKInsertSender.TIMED_OUT:
+                       throw new 
LowLevelPutException(LowLevelPutException.REJECTED_OVERLOAD);
+               case SSKInsertSender.ROUTE_NOT_FOUND:
+                       throw new 
LowLevelPutException(LowLevelPutException.ROUTE_NOT_FOUND);
+               case SSKInsertSender.ROUTE_REALLY_NOT_FOUND:
+                       throw new 
LowLevelPutException(LowLevelPutException.ROUTE_REALLY_NOT_FOUND);
+               case SSKInsertSender.INTERNAL_ERROR:
+                       throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
+               default:
+                       Logger.error(this, "Unknown CHKInsertSender code in 
putSSK: "+is.getStatus()+" on "+is);
+                       throw new 
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR);
+               }
+        }
+    }
+
     long lastAcceptedRequest = -1;

     public synchronized boolean shouldRejectRequest() {
@@ -689,11 +942,12 @@
      */
     public SimpleFieldSet exportFieldSet() {
         SimpleFieldSet fs = new SimpleFieldSet();
-        fs.put("physical.udp", 
getPrimaryIPAddress().getHostAddress()+":"+portNumber);
+        fs.put("physical.udp", 
Peer.getHostName(getPrimaryIPAddress())+":"+portNumber);
         fs.put("identity", HexUtil.bytesToHex(myIdentity));
         fs.put("location", Double.toString(lm.getLocation().getValue()));
         fs.put("version", Version.getVersionString());
         fs.put("testnet", Boolean.toString(testnetEnabled));
+        fs.put("lastGoodVersion", Version.getLastGoodVersionString());
         if(testnetEnabled)
                fs.put("testnetPort", Integer.toString(testnetPort));
         fs.put("myName", myName);
@@ -709,7 +963,7 @@
      * detection properly with NetworkInterface, and we should use
      * third parties if available and UP&P if available.
      */
-    private InetAddress getPrimaryIPAddress() {
+    InetAddress getPrimaryIPAddress() {
         if(overrideIPAddress != null) {
             Logger.minor(this, "Returning overridden address: 
"+overrideIPAddress);
             return overrideIPAddress;
@@ -758,16 +1012,39 @@
      * a RequestSender, unless the HTL is 0, in which case NULL.
      * RequestSender.
      */
-    public synchronized Object makeRequestSender(NodeCHK key, short htl, long 
uid, PeerNode source, double closestLocation, boolean localOnly, boolean cache) 
{
+    public synchronized Object makeRequestSender(Key key, short htl, long uid, 
PeerNode source, double closestLocation, boolean localOnly, boolean cache, 
boolean ignoreStore) {
         Logger.minor(this, 
"makeRequestSender("+key+","+htl+","+uid+","+source+") on "+portNumber);
         // In store?
-        CHKBlock chk = null;
+        KeyBlock chk = null;
+        if(!ignoreStore) {
         try {
-            chk = datastore.fetch(key, !cache);
+               if(key instanceof NodeCHK)
+                       chk = chkDatastore.fetch((NodeCHK)key, !cache);
+               else if(key instanceof NodeSSK) {
+                       NodeSSK k = (NodeSSK)key;
+                       DSAPublicKey pubKey = k.getPubKey();
+                       if(pubKey == null) {
+                               pubKey = getKey(k.getPubKeyHash());
+                               Logger.minor(this, "Fetched pubkey: "+pubKey+" 
"+(pubKey == null ? "" : pubKey.writeAsField()));
+                               try {
+                                               k.setPubKey(pubKey);
+                                       } catch (SSKVerifyException e) {
+                                               Logger.error(this, "Error 
setting pubkey: "+e, e);
+                                       }
+                       }
+                       if(pubKey != null) {
+                               Logger.minor(this, "Got pubkey: "+pubKey+" 
"+pubKey.writeAsField());
+                               chk = sskDatastore.fetch((NodeSSK)key, !cache);
+                       } else {
+                               Logger.minor(this, "Not found because no 
pubkey: "+uid);
+                       }
+               } else
+                       throw new IllegalStateException("Unknown key type: 
"+key.getClass());
         } catch (IOException e) {
             Logger.error(this, "Error accessing store: "+e, e);
         }
         if(chk != null) return chk;
+        }
         if(localOnly) return null;
         Logger.minor(this, "Not in store locally");

@@ -792,16 +1069,16 @@
             return sender;
         }

-        sender = new RequestSender(key, htl, uid, this, closestLocation, 
source);
+        sender = new RequestSender(key, null, htl, uid, this, closestLocation, 
source);
         requestSenders.put(kh, sender);
         Logger.minor(this, "Created new sender: "+sender);
         return sender;
     }

     static class KeyHTLPair {
-        final NodeCHK key;
+        final Key key;
         final short htl;
-        KeyHTLPair(NodeCHK key, short htl) {
+        KeyHTLPair(Key key, short htl) {
             this.key = key;
             this.htl = htl;
         }
@@ -825,7 +1102,7 @@
     /**
      * Add a RequestSender to our HashSet.
      */
-    public synchronized void addSender(NodeCHK key, short htl, RequestSender 
sender) {
+    public synchronized void addSender(Key key, short htl, RequestSender 
sender) {
         KeyHTLPair kh = new KeyHTLPair(key, htl);
         requestSenders.put(kh, sender);
     }
@@ -837,17 +1114,44 @@
         transferringRequestSenders.put(key, sender);
     }

+    public synchronized SSKBlock fetch(NodeSSK key) {
+       try {
+               return sskDatastore.fetch(key, false);
+       } catch (IOException e) {
+               Logger.error(this, "Cannot fetch data: "+e, e);
+               return null;
+       }
+    }
+
+    public synchronized CHKBlock fetch(NodeCHK key) {
+       try {
+               return chkDatastore.fetch(key, false);
+       } catch (IOException e) {
+               Logger.error(this, "Cannot fetch data: "+e, e);
+               return null;
+       }
+    }
+    
     /**
      * Store a datum.
      */
     public synchronized void store(CHKBlock block) {
         try {
-            datastore.put(block);
+            chkDatastore.put(block);
         } catch (IOException e) {
             Logger.error(this, "Cannot store data: "+e, e);
         }
     }

+    public synchronized void store(SSKBlock block) {
+       try {
+               sskDatastore.put(block);
+               cacheKey(((NodeSSK)block.getKey()).getPubKeyHash(), 
((NodeSSK)block.getKey()).getPubKey());
+       } catch (IOException e) {
+               Logger.error(this, "Cannot store data: "+e, e);
+       }
+    }
+    
     /**
      * Remove a sender from the set of currently transferring senders.
      */
@@ -861,7 +1165,7 @@
     /**
      * Remove a RequestSender from the map.
      */
-    public synchronized void removeSender(NodeCHK key, short htl, 
RequestSender sender) {
+    public synchronized void removeSender(Key key, short htl, RequestSender 
sender) {
         KeyHTLPair kh = new KeyHTLPair(key, htl);
         RequestSender rs = (RequestSender) requestSenders.remove(kh);
         if(rs != sender) {
@@ -870,11 +1174,11 @@
     }

     /**
-     * Remove an InsertSender from the map.
+     * Remove an CHKInsertSender from the map.
      */
-    public void removeInsertSender(NodeCHK key, short htl, InsertSender 
sender) {
+    public void removeInsertSender(Key key, short htl, AnyInsertSender sender) 
{
         KeyHTLPair kh = new KeyHTLPair(key, htl);
-        InsertSender is = (InsertSender) insertSenders.remove(kh);
+        AnyInsertSender is = (AnyInsertSender) insertSenders.remove(kh);
         if(is != sender) {
             Logger.error(this, "Removed "+is+" should be "+sender+" for 
"+key+","+htl+" in removeInsertSender");
         }
@@ -906,33 +1210,66 @@
     }

     /**
-     * Fetch or create an InsertSender for a given key/htl.
+     * Fetch or create an CHKInsertSender for a given key/htl.
      * @param key The key to be inserted.
      * @param htl The current HTL. We can't coalesce inserts across
      * HTL's.
      * @param uid The UID of the caller's request chain, or a new
      * one. This is obviously not used if there is already an 
-     * InsertSender running.
+     * CHKInsertSender running.
      * @param source The node that sent the InsertRequest, or null
      * if it originated locally.
      */
-    public synchronized InsertSender makeInsertSender(NodeCHK key, short htl, 
long uid, PeerNode source,
+    public synchronized CHKInsertSender makeInsertSender(NodeCHK key, short 
htl, long uid, PeerNode source,
             byte[] headers, PartiallyReceivedBlock prb, boolean fromStore, 
double closestLoc, boolean cache) {
         Logger.minor(this, 
"makeInsertSender("+key+","+htl+","+uid+","+source+",...,"+fromStore);
         KeyHTLPair kh = new KeyHTLPair(key, htl);
-        InsertSender is = (InsertSender) insertSenders.get(kh);
+        CHKInsertSender is = (CHKInsertSender) insertSenders.get(kh);
         if(is != null) {
             Logger.minor(this, "Found "+is+" for "+kh);
             return is;
         }
         if(fromStore && !cache)
                throw new IllegalArgumentException("From store = true but cache 
= false !!!");
-        is = new InsertSender(key, uid, headers, htl, source, this, prb, 
fromStore, closestLoc);
+        is = new CHKInsertSender(key, uid, headers, htl, source, this, prb, 
fromStore, closestLoc);
         Logger.minor(this, is.toString()+" for "+kh.toString());
         insertSenders.put(kh, is);
         return is;
     }

+    /**
+     * Fetch or create an SSKInsertSender for a given key/htl.
+     * @param key The key to be inserted.
+     * @param htl The current HTL. We can't coalesce inserts across
+     * HTL's.
+     * @param uid The UID of the caller's request chain, or a new
+     * one. This is obviously not used if there is already an 
+     * SSKInsertSender running.
+     * @param source The node that sent the InsertRequest, or null
+     * if it originated locally.
+     */
+    public synchronized SSKInsertSender makeInsertSender(SSKBlock block, short 
htl, long uid, PeerNode source,
+            boolean fromStore, double closestLoc, boolean cache) {
+       NodeSSK key = (NodeSSK) block.getKey();
+       if(key.getPubKey() == null) {
+               throw new IllegalArgumentException("No pub key when inserting");
+       }
+       cacheKey(key.getPubKeyHash(), key.getPubKey());
+        Logger.minor(this, 
"makeInsertSender("+key+","+htl+","+uid+","+source+",...,"+fromStore);
+        KeyHTLPair kh = new KeyHTLPair(key, htl);
+        SSKInsertSender is = (SSKInsertSender) insertSenders.get(kh);
+        if(is != null) {
+            Logger.minor(this, "Found "+is+" for "+kh);
+            return is;
+        }
+        if(fromStore && !cache)
+               throw new IllegalArgumentException("From store = true but cache 
= false !!!");
+        is = new SSKInsertSender(block, uid, htl, source, this, fromStore, 
closestLoc);
+        Logger.minor(this, is.toString()+" for "+kh.toString());
+        insertSenders.put(kh, is);
+        return is;
+    }
+    
     public boolean lockUID(long uid) {
        Logger.minor(this, "Locking "+uid);
         Long l = new Long(uid);
@@ -968,8 +1305,8 @@
                // Dump
                Iterator i = insertSenders.values().iterator();
                while(i.hasNext()) {
-                       InsertSender s = (InsertSender) i.next();
-                       sb.append(s.uid);
+                       AnyInsertSender s = (AnyInsertSender) i.next();
+                       sb.append(s.getUID());
                        sb.append(": ");
                        sb.append(s.getStatusString());
                        sb.append('\n');
@@ -981,6 +1318,28 @@
        sb.append(this.transferringRequestSenders.size());
        return sb.toString();
     }
+    
+    /**
+     * @return Data String for freeviz.
+     */
+    public String getFreevizOutput() {
+       StringBuffer sb = new StringBuffer();
+       sb.append("\nrequests=");
+       sb.append(requestSenders.size());
+       
+       sb.append("\ntransferring_requests=");
+       sb.append(this.transferringRequestSenders.size());
+       
+       sb.append("\ninserts=");
+       sb.append(this.insertSenders.size());
+       sb.append("\n");
+       
+       
+       if (peers != null)
+               sb.append(peers.getFreevizOutput());
+                                       
+       return sb.toString();
+    }

     /**
      * @return Our reference, compressed
@@ -1010,7 +1369,7 @@
     final LRUQueue recentlyCompletedIDs;

     static final int MAX_RECENTLY_COMPLETED_IDS = 10*1000;
-    
+
     /**
      * Has a request completed with this ID recently?
      */
@@ -1032,8 +1391,8 @@
         writeNodeFile();
     }

-       public HighLevelSimpleClient makeClient(short prioClass, short prio) {
-               return new HighLevelSimpleClientImpl(this, archiveManager, 
tempBucketFactory, random, makeStarterClient(prioClass, prio, false), 
makeStarterClient(prioClass, prio, true), !DONT_CACHE_LOCAL_REQUESTS);
+       public HighLevelSimpleClient makeClient(short prioClass) {
+               return new HighLevelSimpleClientImpl(this, archiveManager, 
tempBucketFactory, random, !DONT_CACHE_LOCAL_REQUESTS, prioClass);
        }

        private static class MemoryChecker implements Runnable {
@@ -1041,12 +1400,30 @@
                public void run() {
                        Runtime r = Runtime.getRuntime();
                        while(true) {
+                               for(int i=0;i<20;i++) {
+                                       try {
+                                               Thread.sleep(250);
+                                       } catch (InterruptedException e) {
+                                               // Ignore
+                                       }
+                                       Logger.minor(this, "Memory in use: 
"+(r.totalMemory()-r.freeMemory()));
+                               }
                                try {
-                                       Thread.sleep(1000);
+                                       Thread.sleep(250);
                                } catch (InterruptedException e) {
                                        // Ignore
                                }
-                               Logger.minor(this, "Memory in use: 
"+(r.totalMemory()-r.freeMemory()));
+                               // FIXME
+                               // Do not remove until all known memory issues 
fixed,
+                               // Especially #66
+                               // This probably reduces performance, but it 
makes
+                               // memory usage *more predictable*. This will 
make
+                               // tracking down the sort of nasty 
unpredictable OOMs
+                               // we are getting much easier. 
+                               Logger.minor(this, "Memory in use before GC: 
"+(r.totalMemory()-r.freeMemory()));
+                               System.gc();
+                               System.runFinalization();
+                               Logger.minor(this, "Memory in use after GC: 
"+(r.totalMemory()-r.freeMemory()));
                        }
                }
        }
@@ -1059,14 +1436,117 @@
                return insertThrottle;
        }

-       public RequestStarterClient makeStarterClient(short prioClass, short 
prio, boolean inserts) {
-               return new RequestStarterClient(prioClass, prio, random, this, 
inserts ? insertStarter : requestStarter);
-       }
-
        InetAddress lastIP;

        public void redetectAddress() {
                writeNodeFile();
                 return;
        }
+       
+       /**
+        * Look up a cached public key by its hash.
+        */
+       public DSAPublicKey getKey(byte[] hash) {
+               ImmutableByteArrayWrapper w = new 
ImmutableByteArrayWrapper(hash);
+               Logger.minor(this, "Getting pubkey: "+HexUtil.bytesToHex(hash));
+               synchronized(cachedPubKeys) {
+                       DSAPublicKey key = (DSAPublicKey) cachedPubKeys.get(w);
+                       if(key != null) {
+                               cachedPubKeys.push(w, key);
+                               Logger.minor(this, "Got 
"+HexUtil.bytesToHex(hash)+" from cache");
+                               return key;
+                       }
+               }
+               try {
+                       DSAPublicKey key = pubKeyDatastore.fetchPubKey(hash, 
false);
+                       if(key != null) {
+                               cacheKey(hash, key);
+                               Logger.minor(this, "Got 
"+HexUtil.bytesToHex(hash)+" from store");
+                       }
+                       return key;
+               } catch (IOException e) {
+                       // FIXME deal with disk full, access perms etc; tell 
user about it.
+                       Logger.error(this, "Error accessing pubkey store: "+e, 
e);
+                       return null;
+               }
+       }
+       
+       /**
+        * Cache a public key
+        */
+       public void cacheKey(byte[] hash, DSAPublicKey key) {
+               ImmutableByteArrayWrapper w = new 
ImmutableByteArrayWrapper(hash);
+               synchronized(cachedPubKeys) {
+                       DSAPublicKey key2 = (DSAPublicKey) cachedPubKeys.get(w);
+                       if(key2 != null && !key2.equals(key)) {
+                               MessageDigest md256;
+                               // Check the hash.
+                               try {
+                                       md256 = 
MessageDigest.getInstance("SHA-256");
+                               } catch (NoSuchAlgorithmException e) {
+                                       throw new Error(e);
+                               }
+                               byte[] hashCheck = md256.digest(key.asBytes());
+                               if(Arrays.equals(hashCheck, hash)) {
+                                       Logger.error(this, "Hash is 
correct!!!");
+                                       // Verify the old key
+                                       byte[] oldHash = 
md256.digest(key2.asBytes());
+                                       if(Arrays.equals(oldHash, hash)) {
+                                               Logger.error(this, "Old hash is 
correct too!! - Bug in DSAPublicKey.equals() or SHA-256 collision!");
+                                       } else {
+                                               Logger.error(this, "Old hash is 
wrong!");
+                                               cachedPubKeys.removeKey(w);
+                                               cacheKey(hash, key);
+                                       }
+                               } else {
+                                       Logger.error(this, "New hash is wrong");
+                               }
+                               throw new IllegalArgumentException("Wrong 
hash?? Already have different key with same hash!");
+                       }
+                       cachedPubKeys.push(w, key);
+                       while(cachedPubKeys.size() > MAX_CACHED_KEYS)
+                               cachedPubKeys.popKey();
+               }
+               try {
+                       pubKeyDatastore.put(hash, key);
+               } catch (IOException e) {
+                       // FIXME deal with disk full, access perms etc; tell 
user about it.
+                       Logger.error(this, "Error accessing pubkey store: "+e, 
e);
+               }
+       }
+
+       public boolean isTestnetEnabled() {
+               return testnetEnabled;
+       }
+
+       public ClientKeyBlock fetchKey(ClientKey key) throws KeyVerifyException 
{
+               if(key instanceof ClientCHK)
+                       return fetch((ClientCHK)key);
+               else if(key instanceof ClientSSK)
+                       return fetch((ClientSSK)key);
+               else
+                       throw new IllegalStateException("Don't know what to do 
with "+key);
+       }
+
+       private ClientKeyBlock fetch(ClientSSK clientSSK) throws 
SSKVerifyException {
+               DSAPublicKey key = clientSSK.getPubKey();
+               boolean hadKey = key != null;
+               if(key == null) {
+                       key = getKey(clientSSK.pubKeyHash);
+               }
+               if(key == null) return null;
+               clientSSK.setPublicKey(key);
+               SSKBlock block = fetch((NodeSSK)clientSSK.getNodeKey());
+               if(block == null) return null;
+               // Move the pubkey to the top of the LRU, and fix it if it
+               // was corrupt.
+               cacheKey(clientSSK.pubKeyHash, key);
+               return new ClientSSKBlock(block, clientSSK);
+       }
+
+       private ClientKeyBlock fetch(ClientCHK clientCHK) throws 
CHKVerifyException {
+               CHKBlock block = fetch(clientCHK.getNodeCHK());
+               if(block == null) return null;
+               return new ClientCHKBlock(block, clientCHK);
+       }
 }

Modified: branches/freenet-freejvms/src/freenet/node/NodeDispatcher.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/NodeDispatcher.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/NodeDispatcher.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -70,9 +70,9 @@
             return handleRoutedReply(m);
         } else if(spec == DMT.FNPRoutedRejected) {
             return handleRoutedRejected(m);
-        } else if(spec == DMT.FNPDataRequest) {
+        } else if(spec == DMT.FNPCHKDataRequest || spec == 
DMT.FNPSSKDataRequest) {
             return handleDataRequest(m);
-        } else if(spec == DMT.FNPInsertRequest) {
+        } else if(spec == DMT.FNPInsertRequest || spec == 
DMT.FNPSSKInsertRequest) {
             return handleInsertRequest(m);
         } else if(spec == DMT.FNPLinkPing) {
                long id = m.getLong(DMT.PING_SEQNO);
@@ -169,10 +169,17 @@
             }
             return true;
         }
-        InsertHandler rh = new InsertHandler(m, id, node, now);
-        Thread t = new Thread(rh, "InsertHandler for "+id+" on 
"+node.portNumber);
-        t.setDaemon(true);
-        t.start();
+        if(m.getSpec().equals(DMT.FNPSSKInsertRequest)) {
+               SSKInsertHandler rh = new SSKInsertHandler(m, id, node, now);
+            Thread t = new Thread(rh, "InsertHandler for "+id+" on 
"+node.portNumber);
+            t.setDaemon(true);
+            t.start();
+        } else {
+               InsertHandler rh = new InsertHandler(m, id, node, now);
+               Thread t = new Thread(rh, "InsertHandler for "+id+" on 
"+node.portNumber);
+               t.setDaemon(true);
+               t.start();
+        }
         Logger.minor(this, "Started InsertHandler for "+id);
         return true;
     }

Modified: branches/freenet-freejvms/src/freenet/node/PacketSender.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/PacketSender.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/PacketSender.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -38,6 +38,19 @@
             lastReceivedPacketFromAnyNode = lastReportedNoPackets;
             try {
                 realRun();
+            } catch (OutOfMemoryError e) {
+               Runtime r = Runtime.getRuntime();
+               long usedAtStart = r.totalMemory() - r.freeMemory();
+               System.gc();
+               System.runFinalization();
+               System.gc();
+               System.runFinalization();
+               System.err.println(e.getClass());
+               System.err.println(e.getMessage());
+               e.printStackTrace();
+               long usedNow = r.totalMemory() - r.freeMemory();
+               Logger.error(this, "Caught "+e, e);
+               Logger.error(this, "Used: "+usedAtStart+" now "+usedNow);
             } catch (Throwable t) {
                 Logger.error(this, "Caught in PacketSender: "+t, t);
             }

Modified: branches/freenet-freejvms/src/freenet/node/PeerManager.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/PeerManager.java 2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/PeerManager.java 2006-02-03 
22:55:27 UTC (rev 7999)
@@ -321,7 +321,7 @@
     /**
      * Distance between two locations.
      */
-    static double distance(double d, double loc) {
+    public static double distance(double d, double loc) {
         // Circular keyspace
         double dist = Math.abs(d-loc);
         double min = Math.min(d, loc);
@@ -386,6 +386,7 @@
         for(int i=0;i<myPeers.length;i++) {
             PeerNode pn = myPeers[i];
             status[i] = pn.getStatus();
+           Version.seenVersion(pn.getVersion());
         }
         Arrays.sort(status);
         for(int i=0;i<status.length;i++) {
@@ -394,6 +395,21 @@
         }
         return sb.toString();
     }
+    public String getFreevizOutput() {
+        StringBuffer sb = new StringBuffer();
+        PeerNode[] peers = myPeers;
+        String[] identity = new String[peers.length];
+        for(int i=0;i<myPeers.length;i++) {
+            PeerNode pn = myPeers[i];
+            identity[i] = pn.getFreevizOutput();
+        }
+        Arrays.sort(identity);
+        for(int i=0;i<identity.length;i++) {
+            sb.append(identity[i]);
+            sb.append('\n');
+        }
+        return sb.toString();
+    }

     final Object writePeersSync = new Object();


Modified: branches/freenet-freejvms/src/freenet/node/PeerNode.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/PeerNode.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/PeerNode.java    2006-02-03 
22:55:27 UTC (rev 7999)
@@ -6,12 +6,14 @@
 import java.io.InputStreamReader;
 import java.io.UnsupportedEncodingException;
 import java.io.Writer;
+import java.net.InetAddress;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.zip.DataFormatException;
 import java.util.zip.Inflater;
+import java.util.Vector;

 import freenet.crypt.BlockCipher;
 import freenet.crypt.DiffieHellmanContext;
@@ -57,14 +59,19 @@
  */
 public class PeerNode implements PeerContext {

+       private String lastGoodVersion; 
+       
     /** For debugging/testing, set this to true to stop the
      * probabilistic decrement at the edges of the HTLs.
      */
     static boolean disableProbabilisticHTLs = false;

     /** My low-level address for SocketManager purposes */
-    private Peer peer;
+    private Peer detectedPeer;

+    /** Advertised addresses */
+    private Vector nominalPeer;
+    
     /** Is this a testnet node? */
     public final boolean testnetEnabled;

@@ -205,16 +212,37 @@
         String locationString = fs.get("location");
         if(locationString == null) throw new FSParseException("No location");
         currentLocation = new Location(locationString);
-        String physical = fs.get("physical.udp");
-        if(physical == null) throw new FSParseException("No physical.udp");
-        peer = new Peer(physical);
+
+        // FIXME make mandatory once everyone has upgraded
+        lastGoodVersion = fs.get("lastGoodVersion");
+        
+               nominalPeer=new Vector();
+        nominalPeer.removeAllElements();
+        try{
+               String physical[]=fs.getAll("physical.udp");
+               if(physical==null){
+                       Peer p = new Peer(fs.get("physical.udp"));
+                       nominalPeer.addElement(p);
+               }else{
+                       for(int i=0;i<physical.length;i++){             
+                                       Peer p = new Peer(physical[i]);
+                                   if(!nominalPeer.contains(p)) 
+                                       nominalPeer.addElement(p);
+                       }
+               }
+        } catch (Exception e1) {
+                throw new FSParseException(e1);
+        }
+        if(nominalPeer.isEmpty()) throw new FSParseException("No 
physical.udp");
+        detectedPeer=(Peer) nominalPeer.firstElement();
+        
         String name = fs.get("myName");
         if(name == null) throw new FSParseException("No name");
         myName = name;
         String testnet = fs.get("testnet");
         testnetEnabled = testnet == null ? false : 
(testnet.equalsIgnoreCase("true") || testnet.equalsIgnoreCase("yes"));
         if(testnetEnabled != node.testnetEnabled) {
-               String err = "Ignoring incompatible node "+peer+" - 
peer.testnet="+testnetEnabled+"("+testnet+") but 
node.testnet="+node.testnetEnabled;
+               String err = "Ignoring incompatible node "+detectedPeer+" - 
peer.testnet="+testnetEnabled+"("+testnet+") but 
node.testnet="+node.testnetEnabled;
                Logger.error(this, err);
                throw new PeerParseException(err);
         }
@@ -283,11 +311,44 @@
     /**
      * Get my low-level address
      */
-    public Peer getPeer() {
-        return peer;
+    public Peer getDetectedPeer() {
+        return detectedPeer;
     }
+
+    public Peer getPeer(){
+       return detectedPeer;
+    }

     /**
+     * Returns an array with the advertised addresses and the detected one
+     */
+    public Peer[] getHandshakeIPs(){
+       Peer[] p=null;
+       
+       if( ! nominalPeer.contains(detectedPeer)){
+               p= new Peer[1+nominalPeer.size()];
+               p[0]=detectedPeer;
+               for(int i=1;i<nominalPeer.size()+1;i++)
+                       p[i]=(Peer) nominalPeer.get(i);
+       }else{
+               p = (Peer[]) nominalPeer.toArray(new Peer[nominalPeer.size()]); 
                
+       }
+       // Hack for two nodes on the same IP that can't talk over inet for 
routing reasons
+       InetAddress localhost = node.localhostAddress;
+       InetAddress nodeIP = node.getPrimaryIPAddress();
+       if(nodeIP.equals(localhost)) return p;
+       InetAddress peerIP = detectedPeer.getAddress();
+       if(peerIP.equals(localhost)) return p;
+       if(nodeIP.equals(peerIP)) {
+               Peer[] newPeers = new Peer[p.length+1];
+               System.arraycopy(p, 0, newPeers, 0, p.length);
+               newPeers[newPeers.length-1] = new Peer(node.localhostAddress, 
detectedPeer.getPort());
+               p = newPeers;
+       }
+       return p;
+    }
+    
+    /**
      * What is my current keyspace location?
      */
     public Location getLocation() {
@@ -296,10 +357,11 @@

     /**
      * Is this node currently connected?
-     * Synchronized so that we don't return until after e.g.
-     * completedHandshake has returned in PacketSender.
+     * 
+     * Note possible deadlocks! PeerManager calls this, we call
+     * PeerManager in e.g. verified.
      */
-    public synchronized boolean isConnected() {
+    public boolean isConnected() {
         return isConnected;
     }

@@ -534,7 +596,7 @@
      * @param newPeer The new address of the peer.
      */
     public void changedIP(Peer newPeer) {
-        this.peer = newPeer;
+        this.detectedPeer=newPeer;
     }

     /**
@@ -566,7 +628,7 @@
      * @return short version of toString()
      */
     public String shortToString() {
-        return 
super.toString()+"@"+peer.toString()+"@"+HexUtil.bytesToHex(identity);
+        return 
super.toString()+"@"+detectedPeer.toString()+"@"+HexUtil.bytesToHex(identity);
     }

     public String toString() {
@@ -709,7 +771,8 @@
      * KeyTracker for this node. Will promote the unverifiedTracker
      * if necessary.
      */
-    public synchronized void verified(KeyTracker tracker) {
+    public void verified(KeyTracker tracker) {
+       synchronized(this) {
         if(tracker == unverifiedTracker) {
             Logger.minor(this, "Promoting unverified tracker "+tracker);
             if(previousTracker != null) {
@@ -722,9 +785,10 @@
             unverifiedTracker = null;
             isConnected = true;
             ctx = null;
-            node.peers.addConnectedPeer(this);
             maybeSendInitialMessages();
-        }
+        } else return;
+       }
+        node.peers.addConnectedPeer(this);
     }

     private synchronized boolean invalidVersion() {
@@ -807,15 +871,38 @@
         Location loc = new Location(locationString);
         if(!loc.equals(currentLocation)) changedAnything = true;
         currentLocation = loc;
-        String physical = fs.get("physical.udp");
-        if(physical == null) throw new FSParseException("No physical.udp");
-        try {
-            Peer p = new Peer(physical);
-            if(!p.equals(peer)) changedAnything = true;
-            peer = p;
-        } catch (PeerParseException e1) {
-            throw new FSParseException(e1);
+
+        if(nominalPeer==null)
+               nominalPeer=new Vector();
+        nominalPeer.removeAllElements();
+        
+        lastGoodVersion = fs.get("lastGoodVersion");
+        
+        Peer[] oldPeers = (Peer[]) nominalPeer.toArray(new 
Peer[nominalPeer.size()]);
+        
+        try{
+               String physical[]=fs.getAll("physical.udp");
+               if(physical==null){
+                       Peer p = new Peer(fs.get("physical.udp"));
+                       nominalPeer.addElement(p);
+               }else{
+                       for(int i=0;i<physical.length;i++){             
+                                       Peer p = new Peer(physical[i]);
+                                   if(!nominalPeer.contains(p)) 
+                                       nominalPeer.addElement(p);
+                       }
+               }
+        } catch (Exception e1) {
+                throw new FSParseException(e1);
         }
+        
+        if(!Arrays.equals(oldPeers, nominalPeer.toArray(new 
Peer[nominalPeer.size()])))
+               changedAnything = true;
+        
+        if(nominalPeer.isEmpty()) throw new FSParseException("No 
physical.udp");
+        /* yes, we pick up a random one : it will be updated on handshake */
+        detectedPeer=(Peer) nominalPeer.firstElement();
+        
         String name = fs.get("myName");
         if(name == null) throw new FSParseException("No name");
         if(!name.equals(myName)) changedAnything = true;
@@ -873,7 +960,11 @@
         return 
                (isConnected ? "CONNECTED   " : "DISCONNECTED") + " " + 
getPeer().toString()+" "+myName+" "+currentLocation.getValue()+" 
"+getVersion()+" backoff: "+backoffLength+" ("+(Math.max(backedOffUntil - 
System.currentTimeMillis(),0))+")";
     }
-       
+    
+    public String getFreevizOutput() {
+       return
+                       getStatus()+"|"+ HexUtil.bytesToHex(identity);
+    }
     public String getVersion(){
            return version;
     }
@@ -891,7 +982,10 @@
      */
     private SimpleFieldSet exportFieldSet() {
         SimpleFieldSet fs = new SimpleFieldSet();
-        fs.put("physical.udp", peer.toString());
+        if(lastGoodVersion != null)
+               fs.put("lastGoodVersion", lastGoodVersion);
+        for(int i=0;i<nominalPeer.size();i++)
+               fs.put("physical.udp", nominalPeer.get(i).toString());
         fs.put("identity", HexUtil.bytesToHex(identity));
         fs.put("location", Double.toString(currentLocation.getValue()));
         fs.put("testnet", Boolean.toString(testnetEnabled));

Deleted: branches/freenet-freejvms/src/freenet/node/QueuedDataRequest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/QueuedDataRequest.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/QueuedDataRequest.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,26 +0,0 @@
-package freenet.node;
-
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-public class QueuedDataRequest extends QueuedRequest {
-
-       private final ClientKey key;
-       private final boolean localOnly;
-       private final boolean cache;
-       private QueueingSimpleLowLevelClient client;
-       
-       public QueuedDataRequest(ClientKey key, boolean localOnly, boolean 
cache, QueueingSimpleLowLevelClient client) {
-               this.key = key;
-               this.localOnly = localOnly;
-               this.client = client;
-               this.cache = cache;
-       }
-
-       public ClientKeyBlock waitAndFetch() throws LowLevelGetException {
-               waitForSendClearance();
-               return client.realGetKey(key, localOnly, cache);
-       }
-
-}

Deleted: branches/freenet-freejvms/src/freenet/node/QueuedInsertRequest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/QueuedInsertRequest.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/QueuedInsertRequest.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,21 +0,0 @@
-package freenet.node;
-
-import freenet.keys.ClientCHKBlock;
-
-public class QueuedInsertRequest extends QueuedRequest {
-
-       private final ClientCHKBlock block;
-       private final boolean cache;
-       private QueueingSimpleLowLevelClient client;
-       
-       public QueuedInsertRequest(ClientCHKBlock block, 
QueueingSimpleLowLevelClient client, boolean cache) {
-               this.block = block;
-               this.client = client;
-               this.cache = cache;
-       }
-
-       public void waitAndPut() throws LowLevelPutException {
-               waitForSendClearance();
-               client.realPutCHK(block, cache);
-       }
-}

Deleted: branches/freenet-freejvms/src/freenet/node/QueuedRequest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/QueuedRequest.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/QueuedRequest.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,32 +0,0 @@
-package freenet.node;
-
-/**
- * A request (including both DataRequest's and InsertRequest's) which can be 
queued
- * by a RequestStarter.
- */
-public abstract class QueuedRequest {
-
-       private boolean clearToSend = false;
-       
-       /**
-        * Shell for sending the request.
-        */
-       public final void clearToSend() {
-               synchronized(this) {
-                       clearToSend = true;
-                       notifyAll();
-               }
-       }
-
-       protected void waitForSendClearance() {
-               synchronized(this) {
-                       while(!clearToSend) {
-                               try {
-                                       wait(10*1000);
-                               } catch (InterruptedException e) {
-                                       // Ignore
-                               }
-                       }
-               }
-       }
-}

Deleted: 
branches/freenet-freejvms/src/freenet/node/QueueingSimpleLowLevelClient.java
===================================================================
--- 
branches/freenet-freejvms/src/freenet/node/QueueingSimpleLowLevelClient.java    
    2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/node/QueueingSimpleLowLevelClient.java    
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,17 +0,0 @@
-package freenet.node;
-
-import freenet.client.InsertBlock;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-interface QueueingSimpleLowLevelClient extends SimpleLowLevelClient {
-
-       /** Unqueued version. Only call from QueuedDataRequest ! */
-       ClientKeyBlock realGetKey(ClientKey key, boolean localOnly, boolean 
cache) throws LowLevelGetException;
-
-       /** Ditto */
-       void realPutCHK(ClientCHKBlock block, boolean cache) throws 
LowLevelPutException;
-
-}

Modified: branches/freenet-freejvms/src/freenet/node/RealNodePingTest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RealNodePingTest.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RealNodePingTest.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -25,8 +25,8 @@
         Yarrow yarrow = new Yarrow();
         DiffieHellman.init(yarrow);
         // Create 2 nodes
-        Node node1 = new Node(5001, yarrow, null, "pingtest-", 0, false, fh);
-        Node node2 = new Node(5002, yarrow, null, "pingtest-", 0, false, fh);
+        Node node1 = new Node(5001, yarrow, null, "pingtest-", 0, false, fh, 
0);
+        Node node2 = new Node(5002, yarrow, null, "pingtest-", 0, false, fh, 
0);
         SimpleFieldSet node1ref = node1.exportFieldSet();
         SimpleFieldSet node2ref = node2.exportFieldSet();
         // Connect

Modified: 
branches/freenet-freejvms/src/freenet/node/RealNodeRequestInsertTest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RealNodeRequestInsertTest.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RealNodeRequestInsertTest.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -42,7 +42,7 @@
         Node[] nodes = new Node[NUMBER_OF_NODES];
         Logger.normal(RealNodeRoutingTest.class, "Creating nodes...");
         for(int i=0;i<NUMBER_OF_NODES;i++) {
-            nodes[i] = new Node(5000+i, random, null, wd+File.separator, 0, 
false, fh);
+            nodes[i] = new Node(5000+i, random, null, wd+File.separator, 0, 
false, fh, 100);
             nodes[i].usm.setDropProbability(20); // 5%
             Logger.normal(RealNodeRoutingTest.class, "Created node "+i);
         }
@@ -73,10 +73,6 @@
             b.peers.connect(a.exportFieldSet());
         }

-        RequestStarterClient[] starters = new 
RequestStarterClient[NUMBER_OF_NODES];
-        for(int i=0;i<starters.length;i++)
-               starters[i] = 
nodes[i].makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0, 
false); // pretend are all requests
-
         Logger.normal(RealNodeRoutingTest.class, "Added random links");

         SwapRequestInterval sri =
@@ -175,14 +171,14 @@
                 byte[] data = dataString.getBytes();
                 ClientCHKBlock block;
                 block = ClientCHKBlock.encode(data, false, false, (short)-1, 
0);
-                ClientCHK chk = block.getClientKey();
+                ClientCHK chk = (ClientCHK) block.getClientKey();
                 byte[] encData = block.getData();
-                byte[] encHeaders = block.getHeader();
+                byte[] encHeaders = block.getHeaders();
                 ClientCHKBlock newBlock = new ClientCHKBlock(encData, 
encHeaders, chk, true);
                 Logger.error(RealNodeRequestInsertTest.class, "Decoded: "+new 
String(newBlock.memoryDecode()));
                 Logger.error(RealNodeRequestInsertTest.class,"CHK: 
"+chk.getURI());
-                Logger.error(RealNodeRequestInsertTest.class,"Headers: 
"+HexUtil.bytesToHex(block.getHeader()));
-                randomNode.putCHK(block, starters[node1], true);
+                Logger.error(RealNodeRequestInsertTest.class,"Headers: 
"+HexUtil.bytesToHex(block.getHeaders()));
+                randomNode.realPut(block, true);
                 Logger.error(RealNodeRequestInsertTest.class, "Inserted to 
"+node1);
                 Logger.error(RealNodeRequestInsertTest.class, "Data: 
"+Fields.hashCode(encData)+", Headers: "+Fields.hashCode(encHeaders));
                 // Pick random node to request from
@@ -191,7 +187,7 @@
                     node2 = random.nextInt(NUMBER_OF_NODES);
                 } while(node2 == node1);
                 Node fetchNode = nodes[node2];
-                block = (ClientCHKBlock) fetchNode.getKey((ClientKey) chk, 
false, starters[node2], true);
+                block = (ClientCHKBlock) fetchNode.realGetKey((ClientKey) chk, 
false, true, false);
                 if(block == null) {
                     Logger.error(RealNodeRequestInsertTest.class, "Fetch 
FAILED from "+node2);
                     requestsAvg.report(0.0);

Modified: branches/freenet-freejvms/src/freenet/node/RealNodeRoutingTest.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RealNodeRoutingTest.java 
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RealNodeRoutingTest.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -36,7 +36,7 @@
         Node[] nodes = new Node[NUMBER_OF_NODES];
         Logger.normal(RealNodeRoutingTest.class, "Creating nodes...");
         for(int i=0;i<NUMBER_OF_NODES;i++) {
-            nodes[i] = new Node(5000+i, random, null, wd+File.separator, 0, 
false, fh);
+            nodes[i] = new Node(5000+i, random, null, wd+File.separator, 0, 
false, fh, 0);
             Logger.normal(RealNodeRoutingTest.class, "Created node "+i);
         }
         SimpleFieldSet refs[] = new SimpleFieldSet[NUMBER_OF_NODES];

Modified: branches/freenet-freejvms/src/freenet/node/RequestHandler.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RequestHandler.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RequestHandler.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,11 +1,16 @@
 package freenet.node;

+import freenet.crypt.DSAPublicKey;
 import freenet.io.comm.DMT;
 import freenet.io.comm.Message;
 import freenet.io.xfer.BlockTransmitter;
 import freenet.io.xfer.PartiallyReceivedBlock;
 import freenet.keys.CHKBlock;
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
 import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;
+import freenet.keys.SSKBlock;
 import freenet.support.Logger;

 /**
@@ -21,7 +26,8 @@
     private short htl;
     final PeerNode source;
     private double closestLoc;
-    final NodeCHK key;
+    private boolean needsPubKey;
+    final Key key;

     public String toString() {
         return super.toString()+" for "+uid;
@@ -36,10 +42,12 @@
         closestLoc = req.getDouble(DMT.NEAREST_LOCATION);
         double myLoc = n.lm.getLocation().getValue();
         // FIXME should be more generic when implement SSKs
-        key = (NodeCHK) req.getObject(DMT.FREENET_ROUTING_KEY);
+        key = (Key) req.getObject(DMT.FREENET_ROUTING_KEY);
         double keyLoc = key.toNormalizedDouble();
         if(Math.abs(keyLoc - myLoc) < Math.abs(keyLoc - closestLoc))
             closestLoc = myLoc;
+        if(key instanceof NodeSSK)
+               needsPubKey = m.getBoolean(DMT.NEED_PUB_KEY);
     }

     public void run() {
@@ -50,16 +58,26 @@
         Message accepted = DMT.createFNPAccepted(uid);
         source.send(accepted);

-        Object o = node.makeRequestSender(key, htl, uid, source, closestLoc, 
false, true);
-        if(o instanceof CHKBlock) {
-            CHKBlock block = (CHKBlock) o;
-            Message df = DMT.createFNPDataFound(uid, block.getHeader());
+        Object o = node.makeRequestSender(key, htl, uid, source, closestLoc, 
false, true, false);
+        if(o instanceof KeyBlock) {
+            KeyBlock block = (KeyBlock) o;
+            Message df = createDataFound(block);
             source.send(df);
-            PartiallyReceivedBlock prb =
-                new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, 
Node.PACKET_SIZE, block.getData());
-            BlockTransmitter bt =
-                new BlockTransmitter(node.usm, source, uid, prb);
-            bt.send();
+            if(key instanceof NodeSSK) {
+                if(needsPubKey) {
+                       DSAPublicKey key = 
((NodeSSK)block.getKey()).getPubKey();
+                       Message pk = DMT.createFNPSSKPubKey(uid, key.asBytes());
+                       Logger.minor(this, "Sending PK: "+key+" 
"+key.writeAsField());
+                       source.send(pk);
+                }
+            }
+            if(block instanceof CHKBlock) {
+               PartiallyReceivedBlock prb =
+                       new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, 
Node.PACKET_SIZE, block.getRawData());
+               BlockTransmitter bt =
+                       new BlockTransmitter(node.usm, source, uid, prb);
+               bt.send();
+            }
             return;
         }
         RequestSender rs = (RequestSender) o;
@@ -81,7 +99,8 @@
             }

             if(rs.transferStarted()) {
-                Message df = DMT.createFNPDataFound(uid, rs.getHeaders());
+               // Is a CHK.
+                Message df = DMT.createFNPCHKDataFound(uid, rs.getHeaders());
                 source.send(df);
                 PartiallyReceivedBlock prb = rs.getPRB();
                BlockTransmitter bt =
@@ -101,6 +120,7 @@
                        return;
                case RequestSender.GENERATED_REJECTED_OVERLOAD:
                case RequestSender.TIMED_OUT:
+               case RequestSender.INTERNAL_ERROR:
                        // Locally generated.
                    // Propagate back to source who needs to reduce send rate
                    Message reject = DMT.createFNPRejectedOverload(uid, true);
@@ -112,12 +132,36 @@
                        source.sendAsync(rnf, null);
                        return;
                case RequestSender.SUCCESS:
+                       if(key instanceof NodeSSK) {
+                        Message df = DMT.createFNPSSKDataFound(uid, 
rs.getHeaders(), rs.getSSKData());
+                        source.send(df);
+                        if(needsPubKey) {
+                               Message pk = DMT.createFNPSSKPubKey(uid, 
((NodeSSK)rs.getSSKBlock().getKey()).getPubKey().asBytes());
+                               source.send(df);
+                        }
+                       } else if(!rs.transferStarted()) {
+                               Logger.error(this, "Status is SUCCESS but we 
never started a transfer on "+uid);
+                       }
+                       return;
+               case RequestSender.VERIFY_FAILURE:
+                       if(key instanceof NodeCHK) {
+                               if(shouldHaveStartedTransfer)
+                                       throw new IllegalStateException("Got 
status code "+status+" but transfer not started");
+                               shouldHaveStartedTransfer = true;
+                               continue; // should have started transfer
+                       }
+                   reject = DMT.createFNPRejectedOverload(uid, true);
+                       source.sendAsync(reject, null);
+                       return;
                case RequestSender.TRANSFER_FAILED:
-               case RequestSender.VERIFY_FAILURE:
-                   if(shouldHaveStartedTransfer)
-                       throw new IllegalStateException("Got status code 
"+status+" but transfer not started");
-                   shouldHaveStartedTransfer = true;
-                   continue; // should have started transfer
+                       if(key instanceof NodeCHK) {
+                               if(shouldHaveStartedTransfer)
+                                       throw new IllegalStateException("Got 
status code "+status+" but transfer not started");
+                               shouldHaveStartedTransfer = true;
+                               continue; // should have started transfer
+                       }
+                       // Other side knows, right?
+                       return;
                default:
                    throw new IllegalStateException("Unknown status code 
"+status);
             }
@@ -129,4 +173,13 @@
         }
     }

+       private Message createDataFound(KeyBlock block) {
+               if(block instanceof CHKBlock)
+                       return DMT.createFNPCHKDataFound(uid, 
block.getRawHeaders());
+               else if(block instanceof SSKBlock)
+                       return DMT.createFNPSSKDataFound(uid, 
block.getRawHeaders(), block.getRawData());
+               else
+                       throw new IllegalStateException("Unknown key block 
type: "+block.getClass());
+       }
+
 }

Modified: branches/freenet-freejvms/src/freenet/node/RequestSender.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RequestSender.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RequestSender.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,7 +1,9 @@
 package freenet.node;

+import java.io.IOException;
 import java.util.HashSet;

+import freenet.crypt.DSAPublicKey;
 import freenet.io.comm.DMT;
 import freenet.io.comm.DisconnectedException;
 import freenet.io.comm.Message;
@@ -10,8 +12,12 @@
 import freenet.io.xfer.BlockReceiver;
 import freenet.io.xfer.PartiallyReceivedBlock;
 import freenet.keys.CHKBlock;
-import freenet.keys.CHKVerifyException;
+import freenet.keys.Key;
+import freenet.keys.KeyVerifyException;
 import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;
+import freenet.keys.SSKBlock;
+import freenet.keys.SSKVerifyException;
 import freenet.support.Logger;
 import freenet.support.ShortBuffer;

@@ -34,7 +40,7 @@
     static final int FETCH_TIMEOUT = 60000;

     // Basics
-    final NodeCHK key;
+    final Key key;
     final double target;
     private short htl;
     final long uid;
@@ -43,8 +49,11 @@
     /** The source of this request if any - purely so we can avoid routing to 
it */
     final PeerNode source;
     private PartiallyReceivedBlock prb = null;
+    private DSAPublicKey pubKey;
     private byte[] headers;
+    private byte[] sskData;
     private boolean sentRequest;
+    private SSKBlock block;

     // Terminal status
     // Always set finished AFTER setting the reason flag
@@ -58,6 +67,7 @@
     static final int VERIFY_FAILURE = 5;
     static final int TIMED_OUT = 6;
     static final int GENERATED_REJECTED_OVERLOAD = 7;
+    static final int INTERNAL_ERROR = 8;



@@ -65,14 +75,20 @@
         return super.toString()+" for "+uid;
     }

-    public RequestSender(NodeCHK key, short htl, long uid, Node n, double 
nearestLoc, 
+    public RequestSender(Key key, DSAPublicKey pubKey, short htl, long uid, 
Node n, double nearestLoc, 
             PeerNode source) {
         this.key = key;
+        this.pubKey = pubKey;
         this.htl = htl;
         this.uid = uid;
         this.node = n;
         this.source = source;
         this.nearestLoc = nearestLoc;
+        if(key instanceof NodeSSK && pubKey == null) {
+               pubKey = ((NodeSSK)key).getPubKey();
+               if(pubKey == null)
+                       pubKey = node.getKey(((NodeSSK)key).getPubKeyHash());
+        }

         target = key.toNormalizedDouble();
         Thread t = new Thread(this, "RequestSender for UID "+uid);
@@ -120,7 +136,7 @@
                 Logger.minor(this, "Backtracking: target="+target+" 
next="+nextValue+" closest="+nearestLoc+" so htl="+htl);
             }

-            Message req = DMT.createFNPDataRequest(uid, htl, key, nearestLoc);
+            Message req = createDataRequest();


             next.send(req);
@@ -207,11 +223,15 @@
             while(true) {

                 MessageFilter mfDNF = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPDataNotFound);
-                MessageFilter mfDF = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPDataFound);
+                MessageFilter mfDF = makeDataFoundFilter(next);
                 MessageFilter mfRouteNotFound = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPRouteNotFound);
                 MessageFilter mfRejectedOverload = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPRejectedOverload);
-                MessageFilter mf = 
mfDNF.or(mfDF.or(mfRouteNotFound.or(mfRejectedOverload)));
+                MessageFilter mfPubKey = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPSSKPubKey);
+               MessageFilter mfRealDFCHK = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPCHKDataFound);
+               MessageFilter mfRealDFSSK = 
MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPSSKDataFound);
+                MessageFilter mf = 
mfDNF.or(mfRouteNotFound.or(mfRejectedOverload.or(mfDF.or(mfPubKey.or(mfRealDFCHK.or(mfRealDFSSK))))));

+                
                try {
                        msg = node.usm.waitFor(mf);
                } catch (DisconnectedException e) {
@@ -255,56 +275,113 @@
                                        continue; // Wait for any further 
response
                }

-               if(msg.getSpec() != DMT.FNPDataFound) {
-                       Logger.error(this, "Unexpected message: "+msg);
+               if(msg.getSpec() == DMT.FNPCHKDataFound) {
+                       if(!(key instanceof NodeCHK)) {
+                               Logger.error(this, "Got "+msg+" but expected a 
different key type from "+next);
+                               break;
+                       }
+                       
+                       // Found data
+                       next.successNotOverload();
+                       
+                       // First get headers
+                       
+                       headers = 
((ShortBuffer)msg.getObject(DMT.BLOCK_HEADERS)).getData();
+                       
+                       // FIXME: Validate headers
+                       
+                       node.addTransferringSender((NodeCHK)key, this);
+                       
+                       try {
+                               
+                               prb = new 
PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, Node.PACKET_SIZE);
+                               
+                               synchronized(this) {
+                                       notifyAll();
+                               }
+                               
+                               BlockReceiver br = new BlockReceiver(node.usm, 
next, uid, prb);
+                               
+                               try {
+                                       Logger.minor(this, "Receiving data");
+                                       byte[] data = br.receive();
+                                       Logger.minor(this, "Received data");
+                                       // Received data
+                                       CHKBlock block;
+                                       try {
+                                               verifyAndCommit(data);
+                                       } catch (KeyVerifyException e1) {
+                                               Logger.normal(this, "Got data 
but verify failed: "+e1, e1);
+                                               finish(VERIFY_FAILURE, next);
+                                               return;
+                                       }
+                                       finish(SUCCESS, next);
+                                       return;
+                               } catch (RetrievalException e) {
+                                       Logger.normal(this, "Transfer failed: 
"+e, e);
+                                       finish(TRANSFER_FAILED, next);
+                                       return;
+                               }
+                       } finally {
+                               node.removeTransferringSender((NodeCHK)key, 
this);
+                       }
                }

-               // Found data
-               next.successNotOverload();
-               
-               // First get headers
-               
-               headers = 
((ShortBuffer)msg.getObject(DMT.BLOCK_HEADERS)).getData();
-               
-               // FIXME: Validate headers
-               
-               node.addTransferringSender(key, this);
-               try {
+               if(msg.getSpec() == DMT.FNPSSKPubKey) {

-                       prb = new PartiallyReceivedBlock(Node.PACKETS_IN_BLOCK, 
Node.PACKET_SIZE);
+                       Logger.minor(this, "Got pubkey on "+uid);

-                       synchronized(this) {
-                               notifyAll();
+                       if(!(key instanceof NodeSSK)) {
+                               Logger.error(this, "Got "+msg+" but expected a 
different key type from "+next);
+                               break;
                        }
+                               byte[] pubkeyAsBytes = 
((ShortBuffer)msg.getObject(DMT.PUBKEY_AS_BYTES)).getData();
+                               try {
+                                       if(pubKey == null)
+                                               pubKey = new 
DSAPublicKey(pubkeyAsBytes);
+                                       ((NodeSSK)key).setPubKey(pubKey);
+                               } catch (SSKVerifyException e) {
+                                       pubKey = null;
+                                       Logger.error(this, "Invalid pubkey from 
"+source+" on "+uid+" ("+e.getMessage()+")", e);
+                                       break; // try next node
+                               } catch (IOException e) {
+                                       Logger.error(this, "Invalid pubkey from 
"+source+" on "+uid+" ("+e+")");
+                                       break; // try next node
+                               }
+                               if(sskData != null) {
+                                       finishSSK(next);
+                                       return;
+                               }
+                               continue;
+               }
+               
+               if(msg.getSpec() == DMT.FNPSSKDataFound) {
+
+                       Logger.minor(this, "Got data on "+uid);

-                       BlockReceiver br = new BlockReceiver(node.usm, next, 
uid, prb);
+                       if(!(key instanceof NodeSSK)) {
+                               Logger.error(this, "Got "+msg+" but expected a 
different key type from "+next);
+                               break;
+                       }

-                       try {
-                               byte[] data = br.receive();
-                               // Received data
-                               CHKBlock block;
-                               try {
-                                       block = new CHKBlock(data, headers, 
key);
-                               } catch (CHKVerifyException e1) {
-                                       Logger.normal(this, "Got data but 
verify failed: "+e1, e1);
-                                       finish(VERIFY_FAILURE, next);
-                                       return;
-                               }
-                               node.store(block);
-                               finish(SUCCESS, next);
-                               return;
-                       } catch (RetrievalException e) {
-                               Logger.normal(this, "Transfer failed: "+e, e);
-                               finish(TRANSFER_FAILED, next);
-                               return;
-                       }
-               } finally {
-                       node.removeTransferringSender(key, this);
+                       headers = 
((ShortBuffer)msg.getObject(DMT.BLOCK_HEADERS)).getData();
+                       
+                       sskData = 
((ShortBuffer)msg.getObject(DMT.DATA)).getData();
+                       
+                       if(pubKey != null) {
+                               finishSSK(next);
+                               return;
+                       }
+                       continue;
                }
+               
+                       Logger.error(this, "Unexpected message: "+msg);
+               
             }
         }
         } catch (Throwable t) {
             Logger.error(this, "Caught "+t, t);
+            finish(INTERNAL_ERROR, null);
         } finally {
                Logger.minor(this, "Leaving RequestSender.run() for "+uid);
             node.completed(uid);
@@ -312,7 +389,49 @@
         }
     }

-    private volatile boolean hasForwardedRejectedOverload;
+    private void finishSSK(PeerNode next) {
+       try {
+                       block = new SSKBlock(sskData, headers, (NodeSSK)key, 
false);
+                       node.store(block);
+                       finish(SUCCESS, next);
+               } catch (SSKVerifyException e) {
+                       Logger.error(this, "Failed to verify: "+e+" from 
"+next, e);
+                       finish(VERIFY_FAILURE, next);
+                       return;
+               }
+       }
+
+    /**
+     * Note that this must be first on the list.
+     */
+       private MessageFilter makeDataFoundFilter(PeerNode next) {
+       if(key instanceof NodeCHK)
+               return MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPCHKDataFound);
+       else if(key instanceof NodeSSK) {
+               return MessageFilter.create().setSource(next).setField(DMT.UID, 
uid).setTimeout(FETCH_TIMEOUT).setType(DMT.FNPSSKDataFound);
+       }
+       else throw new IllegalStateException("Unknown keytype: "+key);
+       }
+
+       private Message createDataRequest() {
+       if(key instanceof NodeCHK)
+               return DMT.createFNPCHKDataRequest(uid, htl, (NodeCHK)key, 
nearestLoc);
+       else if(key instanceof NodeSSK)
+               return DMT.createFNPSSKDataRequest(uid, htl, (NodeSSK)key, 
nearestLoc, pubKey == null);
+       else throw new IllegalStateException("Unknown keytype: "+key);
+       }
+
+       private void verifyAndCommit(byte[] data) throws KeyVerifyException {
+       if(key instanceof NodeCHK) {
+               CHKBlock block = new CHKBlock(data, headers, (NodeCHK)key);
+               node.store(block);
+       } else if (key instanceof NodeSSK) {
+               SSKBlock block = new SSKBlock(data, headers, (NodeSSK)key, 
false);
+               node.store(block);
+       }
+       }
+
+       private volatile boolean hasForwardedRejectedOverload;

     /** Forward RejectedOverload to the request originator */
     private synchronized void forwardRejectedOverload() {
@@ -392,4 +511,12 @@
     public short getHTL() {
         return htl;
     }
+    
+    final byte[] getSSKData() {
+       return sskData;
+    }
+    
+    public SSKBlock getSSKBlock() {
+       return block;
+    }
 }

Modified: branches/freenet-freejvms/src/freenet/node/RequestStarter.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RequestStarter.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RequestStarter.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -3,6 +3,9 @@
 import java.util.LinkedList;
 import java.util.Vector;

+import freenet.client.async.ClientRequest;
+import freenet.client.async.RequestScheduler;
+import freenet.client.async.SendableRequest;
 import freenet.support.Logger;
 import freenet.support.UpdatableSortedLinkedList;
 import freenet.support.UpdatableSortedLinkedListKilledException;
@@ -33,155 +36,104 @@
        /** Anything less important than prefetch (redundant??) */
        public static final short MINIMUM_PRIORITY_CLASS = 6;

-       // Clients registered
-       final Vector clientsByPriority;
+       public static final short NUMBER_OF_PRIORITY_CLASSES = 
MINIMUM_PRIORITY_CLASS - MAXIMUM_PRIORITY_CLASS;
+       
        final RequestThrottle throttle;
-       /*
-        * Clients which are ready.
-        * How do we do round-robin?
-        * Have a list of clients which are ready to go, in priority order, and
-        * haven't gone this cycle.
-        * Have a list of clients which are ready to go next cycle, in priority
-        * order.
-        * Have each client track the cycle number in which it was last sent.
-        */
-       final UpdatableSortedLinkedList clientsReadyThisCycle;
-       final UpdatableSortedLinkedList clientsReadyNextCycle;
-       /** Increment every time we go through the whole list */
-       long cycleNumber;
+       RequestScheduler sched;
+       final Node node;
+       private long sentRequestTime;

-       public RequestStarter(RequestThrottle throttle, String name) {
-               clientsByPriority = new Vector();
-               clientsReadyThisCycle = new UpdatableSortedLinkedList();
-               clientsReadyNextCycle = new UpdatableSortedLinkedList();
-               cycleNumber = 0;
+       public RequestStarter(Node node, RequestThrottle throttle, String name) 
{
+               this.node = node;
                this.throttle = throttle;
                this.name = name;
+       }
+
+       void setScheduler(RequestScheduler sched) {
+               this.sched = sched;
+       }
+       
+       void start() {
                Thread t = new Thread(this, name);
                t.setDaemon(true);
                t.start();
        }
-
+       
        final String name;

        public String toString() {
                return name;
        }

-       public synchronized void registerClient(RequestStarterClient client) {
-               int p = client.priority;
-               LinkedList prio = makePriority(p);
-               prio.add(client);
-       }
-
-       public synchronized void notifyReady(RequestStarterClient client) {
-               Logger.minor(this, "notifyReady("+client+")");
-               try {
-                       if(client.getCycleLastSent() == cycleNumber) {
-                               clientsReadyNextCycle.addOrUpdate(client);
-                       } else {
-                               // Can send immediately
-                               clientsReadyThisCycle.addOrUpdate(client);
-                       }
-               } catch (UpdatableSortedLinkedListKilledException e) {
-                       throw new Error(e);
-               }
-               notifyAll();
-       }
-       
-       private synchronized LinkedList makePriority(int p) {
-               while(p >= clientsByPriority.size()) {
-                       clientsByPriority.add(new LinkedList());
-               }
-               return (LinkedList) clientsByPriority.get(p);
-       }
-
-       public void run() {
-               long sentRequestTime = System.currentTimeMillis();
+       void realRun() {
+               SendableRequest req = null;
                while(true) {
-                       RequestStarterClient client;
-                       client = getNextClient();
-                       Logger.minor(this, "getNextClient() = "+client);
-                       if(client != null) {
-                               boolean success;
-                               try {
-                                       success = client.send(cycleNumber);
-                               } catch (Throwable t) {
-                                       Logger.error(this, "Caught "+t);
-                                       continue;
-                               }
-                               if(success) {
-                                       sentRequestTime = 
System.currentTimeMillis();
-                                       Logger.minor(this, "Sent");
-                                       if(client.isReady()) {
-                                               synchronized(this) {
-                                                       try {
-                                                               
clientsReadyNextCycle.addOrUpdate(client);
-                                                       } catch 
(UpdatableSortedLinkedListKilledException e) {
-                                                               // Impossible
-                                                               throw new 
Error(e);
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-                       while(true) {
+                       if(req == null) req = sched.removeFirst();
+                       if(req != null) {
+                               Logger.minor(this, "Running "+req);
+                               // Create a thread to handle starting the 
request, and the resulting feedback
+                               Thread t = new Thread(new SenderThread(req));
+                               t.setDaemon(true);
+                               t.start();
+                               Logger.minor(this, "Started "+req+" on "+t);
+                               sentRequestTime = System.currentTimeMillis();
+                               // Wait
                                long delay = throttle.getDelay();
+                               Logger.minor(this, "Delay="+delay+" from 
"+throttle);
                                long sleepUntil = sentRequestTime + delay;
-                               long now = System.currentTimeMillis();
-                               if(sleepUntil < now) {
-                                       if(waitingClients()) break;
-                                       // Otherwise wait for notification
-                                       try {
-                                               synchronized(this) {
-                                                       wait(1000);
-                                               }
-                                       } catch (InterruptedException e) {
-                                               // Ignore
-                                       }
-                               } else {
-                                       Logger.minor(this, 
"delay="+delay+"("+throttle+") sleep for "+(sleepUntil-now)+" for "+this);
-                                       if(sleepUntil - now > 0)
+                               long now;
+                               do {
+                                       now = System.currentTimeMillis();
+                                       if(now < sleepUntil)
                                                try {
-                                                       synchronized(this) {
-                                                               // At most 
sleep 500ms, then recompute.
-                                                               
wait(Math.min(sleepUntil - now, 500));
-                                                       }
+                                                       Thread.sleep(sleepUntil 
- now);
+                                                       Logger.minor(this, 
"Slept: "+(sleepUntil-now)+"ms");
                                                } catch (InterruptedException 
e) {
                                                        // Ignore
                                                }
+                               } while(now < sleepUntil);
+                               return;
+                       } else {
+                               Logger.minor(this, "Waiting...");
+                               synchronized(this) {
+                                       // Always take the lock on 
RequestStarter first.
+                                       req = sched.removeFirst();
+                                       if(req != null) {
+                                               continue;
+                                       }
+                                       try {
+                                               wait(1000);
+                                       } catch (InterruptedException e) {
+                                               // Ignore
+                                       }
+                                       return;
                                }
                        }
                }
        }
-
-       private synchronized boolean waitingClients() {
-               return !(clientsReadyThisCycle.isEmpty() && 
clientsReadyNextCycle.isEmpty());
+       
+       public void run() {
+               while(true) {
+                       try {
+                               realRun();
+                       } catch (Throwable t) {
+                               Logger.error(this, "Caught "+t, t);
+                       }
+               }
        }
+       
+       private class SenderThread implements Runnable {

-       /**
-        * Get the next ready client.
-        */
-       private synchronized RequestStarterClient getNextClient() {
-               try {
-                       while(true) {
-                       if(clientsReadyThisCycle.isEmpty() && 
clientsReadyNextCycle.isEmpty())
-                               return null;
-                       if(clientsReadyThisCycle.isEmpty()) {
-                               cycleNumber++;
-                               
clientsReadyNextCycle.moveTo(clientsReadyThisCycle);
-                       }
-                       RequestStarterClient c = (RequestStarterClient) 
clientsReadyThisCycle.removeLowest();
-                       if(c.getCycleLastSent() == cycleNumber) {
-                               clientsReadyNextCycle.add(c);
-                               continue;
-                       } else {
-                               c.setCycleLastSet(cycleNumber);
-                               return c;
-                       }
-                       }
-               } catch (UpdatableSortedLinkedListKilledException e) {
-                       throw new Error(e);
+               private final SendableRequest req;
+               
+               public SenderThread(SendableRequest req) {
+                       this.req = req;
                }
+
+               public void run() {
+                       req.send(node);
+               }
+               
        }
+       
 }

Deleted: branches/freenet-freejvms/src/freenet/node/RequestStarterClient.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/RequestStarterClient.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/RequestStarterClient.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,117 +0,0 @@
-package freenet.node;
-
-import java.util.Vector;
-
-import freenet.crypt.RandomSource;
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-import freenet.support.DoublyLinkedList;
-import freenet.support.UpdatableSortedLinkedListItemImpl;
-
-/**
- * Interface to clients for starting a request.
- * Also represents a single client for fairness purposes.
- */
-public class RequestStarterClient extends UpdatableSortedLinkedListItemImpl {
-
-       final int priority;
-       private int random;
-       private long cycleLastSent;
-       private final Vector requests;
-       private final RandomSource rs;
-       private final QueueingSimpleLowLevelClient client;
-       private final RequestStarter starter;
-
-       public RequestStarterClient(short prioClass, short prio, RandomSource 
r, QueueingSimpleLowLevelClient c, RequestStarter starter) {
-               this((prioClass << 16) + prio, r, c, starter);
-       }
-       
-       private RequestStarterClient(int prio, RandomSource r, 
QueueingSimpleLowLevelClient c, RequestStarter starter) {
-               priority = prio;
-               this.random = r.nextInt();
-               this.starter = starter;
-               this.cycleLastSent = -1;
-               this.requests = new Vector();
-               this.rs = r;
-               this.client = c;
-               starter.registerClient(this);
-       }
-       
-       /**
-        * Blocking fetch of a key.
-        * @throws LowLevelGetException If the fetch failed for some reason.
-        */
-       public ClientKeyBlock getKey(ClientKey key, boolean localOnly, boolean 
cache) throws LowLevelGetException {
-               QueuedDataRequest qdr = new QueuedDataRequest(key, localOnly, 
cache, client);
-               addRequest(qdr);
-               return qdr.waitAndFetch();
-       }
-       
-       /**
-        * Blocking insert of a key.
-        * @throws LowLevelPutException If the fetch failed for some reason.
-        */
-       public void putCHK(ClientCHKBlock block, boolean cache) throws 
LowLevelPutException {
-               QueuedInsertRequest qir = new QueuedInsertRequest(block, 
client, cache);
-               addRequest(qir);
-               qir.waitAndPut();
-       }
-       
-       void addRequest(QueuedRequest qr) {
-               synchronized(this) {
-                       requests.add(qr);
-               }
-               if(starter != null)
-                       starter.notifyReady(this);
-       }
-       
-       public long getCycleLastSent() {
-               return cycleLastSent;
-       }
-
-       private DoublyLinkedList parentList;
-       
-       public DoublyLinkedList getParent() {
-               return parentList;
-       }
-
-       public DoublyLinkedList setParent(DoublyLinkedList l) {
-               DoublyLinkedList oldList = parentList;
-               parentList = l;
-               return oldList;
-       }
-
-       public int compareTo(Object o) {
-               if(this == o) return 0;
-               RequestStarterClient c = (RequestStarterClient) o;
-               if(priority > c.priority) return 1;
-               if(priority < c.priority) return -1;
-               if(random > c.random) return 1;
-               if(random < c.random) return -1;
-               return 0;
-       }
-
-       public synchronized boolean isReady() {
-               return !requests.isEmpty();
-       }
-
-       public boolean send(long cycleNumber) {
-               QueuedRequest qr;
-               synchronized(this) {
-                       if(!requests.isEmpty()) {
-                               int x = rs.nextInt(requests.size());
-                               qr = (QueuedRequest) requests.remove(x);
-                       } else qr = null;
-               }
-               if(qr == null) return false;
-               qr.clearToSend();
-               return true;
-       }
-
-       public void setCycleLastSet(long cycleNumber) {
-               this.cycleLastSent = cycleNumber;
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/SSKInsertHandler.java (from 
rev 7998, trunk/freenet/src/freenet/node/SSKInsertHandler.java)

Copied: branches/freenet-freejvms/src/freenet/node/SSKInsertSender.java (from 
rev 7998, trunk/freenet/src/freenet/node/SSKInsertSender.java)

Deleted: branches/freenet-freejvms/src/freenet/node/SimpleLowLevelClient.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/SimpleLowLevelClient.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/SimpleLowLevelClient.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,31 +0,0 @@
-package freenet.node;
-
-import freenet.keys.ClientCHKBlock;
-import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
-import freenet.keys.KeyBlock;
-
-/**
- * @author amphibian
- *
- * Simple client interface... fetch and push single CHKs. No
- * splitfile decoding, no DBRs, no SSKs, for now.
- * 
- * We can build higher layers on top of this.
- */
-public interface SimpleLowLevelClient {
-
-    /**
-     * Fetch a key. Throws if it cannot fetch it.
-     * @param cache If false, don't cache the data. See the comments at the top
-     * of Node.java.
-     */
-    public ClientKeyBlock getKey(ClientKey key, boolean localOnly, 
RequestStarterClient client, boolean cache) throws LowLevelGetException;
-
-    /**
-     * Insert a key.
-     * @param cache If false, don't cache the data. See the comments at the top
-     * of Node.java.
-     */
-    public void putCHK(ClientCHKBlock key, RequestStarterClient sctx, boolean 
cache) throws LowLevelPutException;
-}

Modified: branches/freenet-freejvms/src/freenet/node/TestnetHandler.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/TestnetHandler.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/TestnetHandler.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -44,6 +44,9 @@
                System.err.println("We repeat: YOU HAVE NO ANONYMITY 
WHATSOEVER. DO NOT POST ANYTHING YOU DO NOT WANT TO BE ASSOCIATED WITH.");
                System.err.println("If you want a real freenet node, with 
anonymity, turn off testnet mode.");
                System.err.println("Note, this node will not connect to 
non-testnet nodes, for security reasons. You can of course run a testnet node 
and a non-testnet node separately.");
+       }
+
+       public void start() {
                serverThread = new Thread(this, "Testnet handler thread");
                serverThread.setDaemon(true);
                serverThread.start();
@@ -51,7 +54,7 @@
        }

        private final Node node;
-       private final Thread serverThread;
+       private Thread serverThread;
        private final int testnetPort;

        public void run() {
@@ -68,7 +71,7 @@
                while(true) {
                        try {
                                Socket s = server.accept();
-                               TestnetSocketHandler tsh = new 
TestnetSocketHandler(s);
+                               new TestnetSocketHandler(s);
                        } catch (IOException e) {
                                Logger.error(this, "Testnet failed to accept 
socket: "+e, e);
                        }
@@ -97,6 +100,7 @@
                                InputStreamReader isr = new 
InputStreamReader(is, "ISO-8859-1");
                                BufferedReader br = new BufferedReader(isr);
                                String command = br.readLine();
+                               if(command == null) return;
                                Logger.minor(this, "Command: "+command);
                                if(command.equalsIgnoreCase("LIST")) {
                                        Logger.minor(this, "Listing available 
logs");

Modified: branches/freenet-freejvms/src/freenet/node/TestnetStatusUploader.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/TestnetStatusUploader.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/TestnetStatusUploader.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,18 +1,8 @@
 package freenet.node;

-//import java.io.BufferedReader;
 import java.io.IOException;
-//import java.io.InputStream;
-//import java.io.InputStreamReader;
-//import java.io.OutputStream;
-//import java.io.OutputStreamWriter;
 import java.io.PrintStream;
 import java.net.Socket;
-//import java.text.DateFormat;
-//import java.text.ParseException;
-//import java.util.Date;
-//import java.util.Locale;
-//import java.util.TimeZone;

 import freenet.support.Logger;

@@ -38,30 +28,41 @@
                System.err.println("You have no anonymity. Thank you for 
running a testnet node, this will help the developers to efficiently debug 
Freenet, by letting them (and anyone else who knows how!!) automatically fetch 
your log files.");
                System.err.println("We repeat: YOU HAVE NO ANONYMITY 
WHATSOEVER. DO NOT POST ANYTHING YOU DO NOT WANT TO BE ASSOCIATED WITH.");
                System.err.println("If you want a real freenet node, with 
anonymity, turn off testnet mode.");
+       }
+
+       void start() {
                uploaderThread = new Thread(this, "TestnetStatusUploader 
thread");
                uploaderThread.setDaemon(true);
                uploaderThread.start();
        }

        private final Node node;
-       private final Thread uploaderThread;
+       private Thread uploaderThread;
        private final int updateInterval;
        private Socket client;

        public void run() {
-               // Set up client socket
-               try
-               {
                        //thread loop

                        while(true){

-                               client = new Socket("sleon.dyndns.org", 23415);
-                               PrintStream output = new 
PrintStream(client.getOutputStream());
-                               output.println(node.getStatus());
-                               output.close();
-       
-                               client.close();
+                               // Set up client socket
+                               try
+                               {
+                                       client = new Socket("sleon.dyndns.org", 
23415);
+                                       PrintStream output = new 
PrintStream(client.getOutputStream());
+                               
+                                       
output.println(node.exportFieldSet().toString());
+                                       output.println();
+                                       output.println(node.getFreevizOutput());
+                                       output.close();
+                                       
+                                       client.close();
+                                       
+                               } catch (IOException e){
+                                       Logger.error(this, "Could not open 
connection to the uploadhost");
+                                       System.err.println("Could not open 
connection to the uploadhost");
+                               }

                                try{
                                        Thread.sleep(updateInterval);
@@ -74,11 +75,6 @@

                        }

-               }catch (IOException e){
-                       Logger.error(this, "Could not open connection to the 
uploadhost");
-                       System.err.println("Could not open connection to the 
uploadhost");
-                       return;
-               }

        }


Modified: 
branches/freenet-freejvms/src/freenet/node/TextModeClientInterface.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/TextModeClientInterface.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/TextModeClientInterface.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,16 +1,16 @@
 package freenet.node;

 import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.EOFException;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
+import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLConnection;
 import java.util.HashMap;
 import java.util.Hashtable;
 import java.util.Iterator;
@@ -20,7 +20,6 @@
 import freenet.client.FetchException;
 import freenet.client.FetchResult;
 import freenet.client.HighLevelSimpleClient;
-import freenet.client.HighLevelSimpleClientImpl;
 import freenet.client.InsertBlock;
 import freenet.client.InserterException;
 import freenet.client.Metadata;
@@ -28,6 +27,7 @@
 import freenet.crypt.RandomSource;
 import freenet.io.comm.PeerParseException;
 import freenet.keys.FreenetURI;
+import freenet.keys.InsertableClientSSK;
 import freenet.support.ArrayBucket;
 import freenet.support.Bucket;
 import freenet.support.BucketTools;
@@ -49,19 +49,15 @@
     final Node n;
     final HighLevelSimpleClient client;
     final Hashtable streams;
-    final RequestStarterClient requestStarterClient;
-    final RequestStarterClient insertStarterClient;
     final File downloadsDir;

     TextModeClientInterface(Node n) {
         this.n = n;
-        client = n.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, 
(short)0);
+        client = n.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS);
         client.addGlobalHook(new EventDumper());
         this.r = n.random;
         streams = new Hashtable();
         new Thread(this, "Text mode client interface").start();
-        this.requestStarterClient = 
n.makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0, false);
-        this.insertStarterClient = 
n.makeStarterClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, (short)0, true);
         this.downloadsDir = n.downloadDir;
     }

@@ -87,7 +83,7 @@
         System.out.println("Freenet 0.7 Trivial Node Test Interface");
         System.out.println("---------------------------------------");
         System.out.println();
-        System.out.println("Build "+Version.buildNumber);
+        System.out.println("Build "+Version.buildNumber());
         System.out.println("Enter one of the following commands:");
         System.out.println("GET:<Freenet key> - Fetch a key");
         System.out.println("PUT:\n<text, until a . on a line by itself> - 
Insert the document and return the key.");
@@ -99,10 +95,13 @@
         System.out.println("GETCHKFILE:<filename> - Get the key that would be 
returned if we inserted the file.");
         System.out.println("PUTDIR:<path>[#<defaultfile>] - Put the entire 
directory from disk.");
         System.out.println("GETCHKDIR:<path>[#<defaultfile>] - Get the key 
that would be returned if we'd put the entire directory from disk.");
+        System.out.println("MAKESSK - Create an SSK keypair.");
+        System.out.println("PUTSSK:<insert uri>;<url to redirect to> - Insert 
an SSK redirect to a file already inserted.");
+        System.out.println("PUTSSKDIR:<insert uri>#<path>[#<defaultfile>] - 
Insert an entire directory to an SSK.");
 //        System.out.println("PUBLISH:<name> - create a publish/subscribe 
stream called <name>");
 //        System.out.println("PUSH:<name>:<text> - publish a single line of 
text to the stream named");
 //        System.out.println("SUBSCRIBE:<key> - subscribe to a 
publish/subscribe stream by key");
-        System.out.println("CONNECT:<filename> - connect to a node from its 
ref in a file.");
+        System.out.println("CONNECT:<filename|URL> - connect to a node from 
its ref in a file/url.");
         System.out.println("CONNECT:\n<noderef including an End on a line by 
itself> - enter a noderef directly.");
         System.out.println("DISCONNECT:<ip:port> - disconnect from a node by 
providing it's ip+port");
         System.out.println("NAME:<new node name> - change the node's name.");
@@ -151,12 +150,32 @@
                                FetchResult result = client.fetch(uri);
                                ClientMetadata cm = result.getMetadata();
                                System.out.println("Content MIME type: 
"+cm.getMIMEType());
+                               Bucket data = result.asBucket();
+                               // FIXME limit it above
+                               if(data.size() > 32*1024) {
+                                       System.err.println("Data is more than 
32K: "+data.size());
+                                       return;
+                               }
+                               byte[] dataBytes = 
BucketTools.toByteArray(data);
+                               boolean evil = false;
+                               for(int i=0;i<dataBytes.length;i++) {
+                                       // Look for escape codes
+                                       if(dataBytes[i] == '\n') continue;
+                                       if(dataBytes[i] == '\r') continue;
+                                       if(dataBytes[i] < 32) evil = true;
+                               }
+                               if(evil) {
+                                       System.err.println("Data may contain 
escape codes which could cause the terminal to run arbitrary commands! Save it 
to a file if you must with GETFILE:");
+                                       return;
+                               }
                                System.out.println("Data:\n");
-                               Bucket data = result.asBucket();
-                               BucketTools.copyTo(data, System.out, 
Long.MAX_VALUE);
+                               System.out.println(new String(dataBytes));
                                System.out.println();
                        } catch (FetchException e) {
                                System.out.println("Error: "+e.getMessage());
+               if(e.getMode() == e.SPLITFILE_ERROR && e.errorCodes != null) {
+                       System.out.println(e.errorCodes.toVerboseString());
+               }
                        }
         } else if(uline.startsWith("GETFILE:")) {
             // Should have a key next
@@ -224,7 +243,10 @@
             System.exit(0);
         } else if(uline.startsWith("PUT:") || (getCHKOnly = 
uline.startsWith("GETCHK:"))) {
             // Just insert to local store
-            line = line.substring("PUT:".length());
+               if(getCHKOnly)
+                       line = line.substring(("GETCHK:").length());
+               else
+                       line = line.substring("PUT:".length());
             while(line.length() > 0 && line.charAt(0) == ' ')
                 line = line.substring(1);
             while(line.length() > 0 && line.charAt(line.length()-1) == ' ')
@@ -258,13 +280,18 @@

             System.out.println("URI: "+uri);
             
////////////////////////////////////////////////////////////////////////////////
-        } else if(uline.startsWith("PUTDIR:") || (getCHKOnly = 
uline.startsWith("GETCHKDIR:"))) {
+        } else if(uline.startsWith("PUTDIR:") || 
(uline.startsWith("PUTSSKDIR")) || (getCHKOnly = 
uline.startsWith("GETCHKDIR:"))) {
                // TODO: Check for errors?
-               if(getCHKOnly) {
+               boolean ssk = false;
+               if(uline.startsWith("PUTDIR:"))
+                       line = line.substring("PUTDIR:".length());
+               else if(uline.startsWith("PUTSSKDIR:")) {
+                       line = line.substring("PUTSSKDIR:".length());
+                       ssk = true;
+               } else if(uline.startsWith("GETCHKDIR:"))
                        line = line.substring(("GETCHKDIR:").length());
-               } else {
-                       line = line.substring("PUTDIR:".length());
-               }
+               else
+                       System.err.println("Impossible");

                line = line.trim();

@@ -275,67 +302,57 @@

                String defaultFile = null;

+               FreenetURI insertURI = FreenetURI.EMPTY_CHK_URI;
+               
                // set default file?
                if (line.matches("^.*#.*$")) {
-                       defaultFile = line.split("#")[1];
-                       line = line.split("#")[0];
+                       String[] split = line.split("#");
+                       if(ssk) {
+                               insertURI = new FreenetURI(split[0]);
+                               line = split[1];
+                               if(split.length > 2)
+                                       defaultFile = split[2];
+                       } else {
+                               defaultFile = split[1];
+                               line = split[0];
+                       }
                }

-               // Get files as name and keys
-               HashMap manifestBase = dirPut(line, getCHKOnly);
+               HashMap bucketsByName =
+                       makeBucketsByName(line);

-               // Set defaultfile
-               if (defaultFile != null) {
-                       HashMap currPos = manifestBase;
-                       String splitpath[] = defaultFile.split("/");
-                       int i = 0;
-                       for( ; i < (splitpath.length - 1) ; i++)
-                               currPos = (HashMap)currPos.get(splitpath[i]);
-                       
-                       if (currPos.get(splitpath[i]) != null) {
-                               // Add key as default
-                               manifestBase.put("", currPos.get(splitpath[i]));
-                               System.out.println("Using default key: " + 
currPos.get(splitpath[i]));
-                       }else{
-                               System.err.println("Default key not found. No 
default document.");
+               if(defaultFile == null) {
+                       String[] defaultFiles = 
+                               new String[] { "index.html", "index.htm", 
"default.html", "default.htm" };
+                       for(int i=0;i<defaultFiles.length;i++) {
+                               if(bucketsByName.containsKey(defaultFiles[i])) {
+                                       defaultFile = defaultFiles[i];
+                                       break;
+                               }                                       
                        }
-                       //getchkdir:/home/cyberdo/fort/new#filelist
                }

-               // Create metadata
-            Metadata med = Metadata.mkRedirectionManifest(manifestBase);
-            ClientMetadata md = med.getClientMetadata();
-            
-            // Extract binary data from metadata
-            ArrayBucket metabucket = new ArrayBucket();
-            DataOutputStream mdos = new DataOutputStream( 
metabucket.getOutputStream() );
-            med.writeTo(mdos);
-            mdos.close();
-            
-            // Insert metadata
-            InsertBlock block = new InsertBlock(metabucket, md, 
FreenetURI.EMPTY_CHK_URI);
-            
-            FreenetURI uri;
-            try {
-               uri = ((HighLevelSimpleClientImpl)client).insert(block, 
getCHKOnly, true);
-            } catch (InserterException e) {
-               System.out.println("Error: "+e.getMessage());
-               if(e.uri != null)
-                       System.out.println("URI would have been: "+e.uri);
-               int mode = e.getMode();
-               if(mode == InserterException.FATAL_ERRORS_IN_BLOCKS || mode == 
InserterException.TOO_MANY_RETRIES_IN_BLOCKS) {
-                       System.out.println("Splitfile-specific 
error:\n"+e.errorCodes.toVerboseString());
+               FreenetURI uri;
+                       try {
+                               uri = client.insertManifest(insertURI, 
bucketsByName, defaultFile);
+                               uri = uri.addMetaStrings(new String[] { "" });
+                       
System.out.println("=======================================================");
+                   System.out.println("URI: "+uri);
+                       
System.out.println("=======================================================");
+                       } catch (InserterException e) {
+               System.out.println("Finished insert but: "+e.getMessage());
+               if(e.uri != null) {
+                       uri = e.uri;
+                               uri = uri.addMetaStrings(new String[] { "" });
+                       System.out.println("URI would have been: "+uri);
                }
-               return;
-            }
+               if(e.errorCodes != null) {
+                       System.out.println("Splitfile errors breakdown:");
+                       System.out.println(e.errorCodes.toVerboseString());
+               }
+               Logger.error(this, "Caught "+e, e);
+                       }

-               String filelist = dirPutToList(manifestBase, "");
-               
System.out.println("=======================================================");
-               System.out.println(filelist);
-               
System.out.println("=======================================================");
-            System.out.println("URI: "+uri);
-               
System.out.println("=======================================================");
-            
         } else if(uline.startsWith("PUTFILE:") || (getCHKOnly = 
uline.startsWith("GETCHKFILE:"))) {
             // Just insert to local store
                if(getCHKOnly) {
@@ -358,6 +375,8 @@
                // Guess MIME type
                String mimeType = DefaultMIMETypes.guessMIMEType(line);
                System.out.println("Using MIME type: "+mimeType);
+               if(mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE))
+                       mimeType = ""; // don't need to override it

                FileBucket fb = new FileBucket(f, true, false, false);
                InsertBlock block = new InsertBlock(fb, new 
ClientMetadata(mimeType), FreenetURI.EMPTY_CHK_URI);
@@ -392,40 +411,83 @@
                 System.out.println("Insert threw: "+t);
                 t.printStackTrace();
             }
+        } else if(uline.startsWith("MAKESSK")) {
+               InsertableClientSSK key = InsertableClientSSK.createRandom(r);
+               System.out.println("Insert URI: 
"+key.getInsertURI().toString(false));
+               System.out.println("Request URI: 
"+key.getURI().toString(false));
+               FreenetURI insertURI = 
key.getInsertURI().setDocName("testsite");
+               String fixedInsertURI = insertURI.toString(false);
+               System.out.println("Note that you MUST add a filename to the 
end of the above URLs e.g.:\n"+fixedInsertURI);
+               System.out.println("Normally you will then do PUTSSKDIR:<insert 
URI>#<directory to upload>, for 
example:\nPUTSSKDIR:"+fixedInsertURI+"#directoryToUpload/");
+               System.out.println("This will then produce a manifest site 
containing all the files, the default document can be accessed 
at\n"+insertURI.addMetaStrings(new String[] { "" }).toString(false));
+        } else if(uline.startsWith("PUTSSK:")) {
+               String cmd = line.substring("PUTSSK:".length());
+               cmd = cmd.trim();
+               if(cmd.indexOf(';') <= 0) {
+                       System.out.println("No target URI provided.");
+                       System.out.println("PUTSSK:<insert uri>;<url to 
redirect to>");
+                       return;
+               }
+               String[] split = cmd.split(";");
+               String insertURI = split[0];
+               String targetURI = split[1];
+               System.out.println("Insert URI: "+insertURI);
+               System.out.println("Target URI: "+targetURI);
+               FreenetURI insert = new FreenetURI(insertURI);
+               FreenetURI target = new FreenetURI(targetURI);
+               InsertableClientSSK key = InsertableClientSSK.create(insert);
+               System.out.println("Fetch URI: "+key.getURI());
+               try {
+                               FreenetURI result = 
client.insertRedirect(insert, target);
+                               System.out.println("Successfully inserted to 
fetch URI: "+key.getURI());
+                       } catch (InserterException e) {
+               System.out.println("Finished insert but: "+e.getMessage());
+               Logger.normal(this, "Error: "+e, e);
+               if(e.uri != null) {
+                       System.out.println("URI would have been: "+e.uri);
+               }
+                       }
+               
         } else if(uline.startsWith("STATUS")) {
             SimpleFieldSet fs = n.exportFieldSet();
             System.out.println(fs.toString());
             System.out.println();
             System.out.println(n.getStatus());
+           if(Version.buildNumber()<Version.highestSeenBuild){
+                   System.out.println("The latest version is : 
"+Version.highestSeenBuild);
+           }
         } else if(uline.startsWith("CONNECT:")) {
             String key = line.substring("CONNECT:".length());
             while(key.length() > 0 && key.charAt(0) == ' ')
                 key = key.substring(1);
             while(key.length() > 0 && key.charAt(key.length()-1) == ' ')
                 key = key.substring(0, key.length()-2);
+            
+            String content = null;
             if(key.length() > 0) {
                 // Filename
+               BufferedReader in;
                 System.out.println("Trying to connect to noderef in "+key);
                 File f = new File(key);
-                System.out.println("Attempting to read file "+key);
-                try {
-                    FileInputStream fis = new FileInputStream(key);
-                    DataInputStream dis = new DataInputStream(fis);
-                    int length = (int)f.length();
-                    byte[] data = new byte[length];
-                    dis.readFully(data);
-                    dis.close();
-                    connect(new String(data));
-                } catch (IOException e) {
-                    System.err.println("Could not read file: "+e);
-                    e.printStackTrace(System.err);
+                if (f.isFile()) {
+                       System.out.println("Given string seems to be a file, 
loading...");
+                       in = new BufferedReader(new FileReader(f));
+                } else {
+                       System.out.println("Given string seems to be an URL, 
loading...");
+                    URL url = new URL(key);
+                    URLConnection uc = url.openConnection();
+                       in = new BufferedReader(
+                                       new 
InputStreamReader(uc.getInputStream()));
                 }
+                content = readLines(in, true);
+                in.close();
             } else {
-                String content = readLines(reader, true);
-                if(content == null) return;
-                if(content.equals("")) return;
-                connect(content);
+                content = readLines(reader, true);
             }
+            if(content == null) return;
+            if(content.equals("")) return;
+            connect(content);
+        
         } else if(uline.startsWith("NAME:")) {
             System.out.println("Node name currently: "+n.myName);
             String key = line.substring("NAME:".length());
@@ -444,101 +506,43 @@
         }
     }

-    
-    private String dirPutToList(HashMap dir, String basedir) {
-       String ret = "";
-               for(Iterator i = dir.keySet().iterator();i.hasNext();) {
-                       String key = (String) i.next();
-                       Object o = dir.get(key);
-                       Metadata target;
-                       if(o instanceof String) {
-                               // File
-                               ret += basedir + key + "\n";
-                       } else if(o instanceof HashMap) {
-                               ret += dirPutToList((HashMap)o, basedir + key + 
"//");
-                       } else throw new IllegalArgumentException("Not String 
nor HashMap: "+o);
-               }
-               return ret;
-    }
-    
-    private HashMap dirPut(String directory, boolean getCHKOnly) {
+    /**
+     * Create a map of String -> Bucket for every file in a directory
+     * and its subdirs.
+     */
+    private HashMap makeBucketsByName(String directory) {
+       
        if (!directory.endsWith("/"))
                directory = directory + "/";
        File thisdir = new File(directory);

+       System.out.println("Listing dir: "+thisdir);
+       
        HashMap ret = new HashMap();

        File filelist[] = thisdir.listFiles();
-       for(int i = 0 ; i < filelist.length ; i++)
-               if (filelist[i].isFile()) {
-                       FreenetURI uri = null;
+       if(filelist == null)
+               throw new IllegalArgumentException("No such directory");
+       for(int i = 0 ; i < filelist.length ; i++) {
+               if (filelist[i].isFile() && filelist[i].canRead()) {
                        File f = filelist[i];
-                       String line = f.getAbsolutePath(); 
-                       // To ease cleanup, the following code is taken from 
above
-                       // Except for the uri-declaration above.
-                       // Somelines is also commented out
-                       
//////////////////////////////////////////////////////////////////////////////////////
-                       System.out.println("Attempting to read file "+line);
-                long startTime = System.currentTimeMillis();
-                try {
-                       if(!(f.exists() && f.canRead())) {
-                               throw new FileNotFoundException();
-                       }
-                       
-                       // Guess MIME type
-                       String mimeType = DefaultMIMETypes.guessMIMEType(line);
-                       System.out.println("Using MIME type: "+mimeType);
-                       
-                       FileBucket fb = new FileBucket(f, true, false, false);
-                       InsertBlock block = new InsertBlock(fb, new 
ClientMetadata(mimeType), FreenetURI.EMPTY_CHK_URI);
-
-                       startTime = System.currentTimeMillis();
-                       // Declaration is moved out!!!!!!!!!!!!
-                       uri = client.insert(block, getCHKOnly);
-                       
-                       // FIXME depends on CHK's still being renamable
-                    //uri = uri.setDocName(f.getName());
-                       
-                    System.out.println("URI: "+uri);
-                       long endTime = System.currentTimeMillis();
-                    long sz = f.length();
-                    double rate = 1000.0 * sz / (endTime-startTime);
-                    System.out.println("Upload rate: "+rate+" bytes / second");
-                } catch (FileNotFoundException e1) {
-                    System.out.println("File not found");
-                } catch (InserterException e) {
-                       System.out.println("Finished insert but: 
"+e.getMessage());
-                       if(e.uri != null) {
-                               System.out.println("URI would have been: 
"+e.uri);
-                       long endTime = System.currentTimeMillis();
-                        long sz = f.length();
-                        double rate = 1000.0 * sz / (endTime-startTime);
-                        System.out.println("Upload rate: "+rate+" bytes / 
second");
-                       }
-                       if(e.errorCodes != null) {
-                               System.out.println("Splitfile errors 
breakdown:");
-                               
System.out.println(e.errorCodes.toVerboseString());
-                       }
-                } catch (Throwable t) {
-                    System.out.println("Insert threw: "+t);
-                    t.printStackTrace();
-                }
-                
//////////////////////////////////////////////////////////////////////////////////////

-                if (uri != null)
-                       ret.put(filelist[i].getName(), uri.toString(false));
-                else
-                       System.err.println("Could not insert file.");
-                //ret.put(filelist[i].getName(), null);
-               } else {
-                       HashMap subdir = dirPut(filelist[i].getAbsolutePath(), 
getCHKOnly);
-                       ret.put(filelist[i].getName(), subdir);
+                       FileBucket bucket = new FileBucket(f, true, false, 
false);
+                       
+                       ret.put(f.getName(), bucket);
+               } else if(filelist[i].isDirectory()) {
+                       HashMap subdir = makeBucketsByName(directory + 
filelist[i].getName());
+                       Iterator it = subdir.keySet().iterator();
+                       while(it.hasNext()) {
+                               String key = (String) it.next();
+                               Bucket bucket = (Bucket) subdir.get(key);
+                               ret.put(filelist[i].getName() + "/" + key, 
bucket);
+                       }
                }
-       
+       }
        return ret;
        }
-    
-    
+
     /**
      * @return A block of text, input from stdin, ending with a
      * . on a line by itself. Does some mangling for a fieldset if 
@@ -626,10 +630,12 @@
         try {
             pn = new PeerNode(fs, n);
         } catch (FSParseException e1) {
-            System.err.println("Did not parse: "+e1.getMessage());
+            System.err.println("Did not parse: "+e1);
+            Logger.error(this, "Did not parse: "+e1, e1);
             return;
         } catch (PeerParseException e1) {
-            System.err.println("Did not parse: "+e1.getMessage());
+            System.err.println("Did not parse: "+e1);
+            Logger.error(this, "Did not parse: "+e1, e1);
             return;
         }
         if(n.peers.addPeer(pn))
@@ -645,7 +651,7 @@
        PeerNode[] pn = n.peers.myPeers;
        for(int i=0;i<pn.length;i++)
        {
-               String nodeIpAndPort = 
pn[i].getPeer().getAddress().getHostAddress()+":"+pn[i].getPeer().getPort();
+               String nodeIpAndPort = 
pn[i].getDetectedPeer().getAddress().getHostAddress()+":"+pn[i].getDetectedPeer().getPort();
                if(nodeIpAndPort.equals(ipAndPort))
                {
                        n.peers.disconnect(pn[i]);

Modified: branches/freenet-freejvms/src/freenet/node/Version.java
===================================================================
--- branches/freenet-freejvms/src/freenet/node/Version.java     2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/Version.java     2006-02-03 
22:55:27 UTC (rev 7999)
@@ -20,11 +20,19 @@
        public static final String protocolVersion = "1.0";

        /** The build number of the current revision */
-       public static final int buildNumber = 312;
+       private static final int buildNumber = 431;

        /** Oldest build of Fred we will talk to */
-       public static final int lastGoodBuild = 310;
+       private static final int lastGoodBuild = 403;

+       public static final int buildNumber() {
+               return buildNumber;
+       }
+       
+       public static final int lastGoodBuild() {
+               return lastGoodBuild;
+       }
+       
        /** The highest reported build of fred */
        public static int highestSeenBuild = buildNumber;

@@ -57,13 +65,26 @@
                        { nodeName, nodeVersion, protocolVersion, "" + 
buildNumber };
                return ret;
        }
-
+       
+       public static final String[] getLastGoodVersion() {
+               String[] ret =
+                       { nodeName, nodeVersion, protocolVersion, "" + 
lastGoodBuild };
+               return ret;
+       }
+       
        /**
         * @return the version string that should be presented in the 
NodeReference
         */
        public static final String getVersionString() {
                return Fields.commaList(getVersion());
        }
+       
+       /**
+        * @return is needed for the freeviz
+        */
+       public static final String getLastGoodVersionString() {
+               return Fields.commaList(getLastGoodVersion());
+       }

        /**
         * @return true if requests should be accepted from nodes brandishing 
this

Copied: branches/freenet-freejvms/src/freenet/node/fcp (from rev 7998, 
trunk/freenet/src/freenet/node/fcp)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/AllDataMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/AllDataMessage.java      2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/AllDataMessage.java  
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,42 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.support.Bucket;
-import freenet.support.SimpleFieldSet;
-
-/**
- * All the data, all in one big chunk. Obviously we must already have
- * all the data to send it. We do not want to have to block on a request,
- * especially as there may be errors.
- */
-public class AllDataMessage extends DataCarryingMessage {
-
-       final long dataLength;
-       final String identifier;
-       
-       public AllDataMessage(FCPConnectionHandler handler, Bucket bucket, 
String identifier) {
-               this.bucket = bucket;
-               this.dataLength = bucket.size();
-               this.identifier = identifier;
-       }
-
-       long dataLength() {
-               return dataLength;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("DataLength", Long.toString(dataLength));
-               fs.put("Identifier", identifier);
-               return fs;
-       }
-
-       public String getName() {
-               return "AllData";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) throws 
MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "AllData goes 
from server to client not the other way around");
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/AllDataMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/AllDataMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientGet.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGet.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientGet.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,108 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.FetcherContext;
-import freenet.client.InserterException;
-import freenet.client.async.BaseClientPutter;
-import freenet.client.async.ClientCallback;
-import freenet.client.async.ClientGetter;
-import freenet.client.async.ClientPutter;
-import freenet.client.events.ClientEvent;
-import freenet.client.events.ClientEventListener;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-import freenet.support.Logger;
-
-/**
- * A simple client fetch. This can of course fetch arbitrarily large
- * files, including splitfiles, redirects, etc.
- */
-public class ClientGet extends ClientRequest implements ClientCallback, 
ClientEventListener {
-
-       private final FreenetURI uri;
-       private final FetcherContext fctx;
-       private final String identifier;
-       private final int verbosity;
-       private final FCPConnectionHandler handler;
-       private final ClientGetter getter;
-       private final short priorityClass;
-       private boolean finished;
-       
-       // Verbosity bitmasks
-       private int VERBOSITY_SPLITFILE_PROGRESS = 1;
-       
-       public ClientGet(FCPConnectionHandler handler, ClientGetMessage 
message) {
-               uri = message.uri;
-               // FIXME
-               this.priorityClass = message.priorityClass;
-               // Create a Fetcher directly in order to get more fine-grained 
control,
-               // since the client may override a few context elements.
-               this.handler = handler;
-               fctx = new FetcherContext(handler.defaultFetchContext, 
FetcherContext.IDENTICAL_MASK);
-               fctx.eventProducer.addEventListener(this);
-               // ignoreDS
-               fctx.localRequestOnly = message.dsOnly;
-               fctx.ignoreStore = message.ignoreDS;
-               fctx.maxNonSplitfileRetries = message.maxRetries;
-               fctx.maxSplitfileBlockRetries = 
Math.max(fctx.maxSplitfileBlockRetries, message.maxRetries);
-               this.identifier = message.identifier;
-               this.verbosity = message.verbosity;
-               // FIXME do something with verbosity !!
-               // Has already been checked
-               if(message.returnType != ClientGetMessage.RETURN_TYPE_DIRECT)
-                       throw new IllegalStateException("Unknown return type: 
"+message.returnType);
-               fctx.maxOutputLength = message.maxSize;
-               fctx.maxTempLength = message.maxTempSize;
-               getter = new ClientGetter(this, handler.node.fetchScheduler, 
uri, fctx, priorityClass, handler.defaultFetchContext);
-               try {
-                       getter.start();
-               } catch (FetchException e) {
-                       onFailure(e, null);
-               }
-       }
-
-       public void cancel() {
-               getter.cancel();
-       }
-
-       public void onSuccess(FetchResult result, ClientGetter state) {
-               finished = true;
-               FCPMessage msg = new DataFoundMessage(handler, result, 
identifier);
-               handler.outputHandler.queue(msg);
-               // Send all the data at once
-               // FIXME there should be other options
-               msg = new AllDataMessage(handler, result.asBucket(), 
identifier);
-               handler.outputHandler.queue(msg);
-       }
-
-       public void onFailure(FetchException e, ClientGetter state) {
-               finished = true;
-               Logger.minor(this, "Caught "+e, e);
-               FCPMessage msg = new GetFailedMessage(handler, e, identifier);
-               handler.outputHandler.queue(msg);
-       }
-
-       public void onSuccess(BaseClientPutter state) {
-               // Ignore
-       }
-
-       public void onFailure(InserterException e, BaseClientPutter state) {
-               // Ignore
-       }
-
-       public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
-               // Ignore
-       }
-
-       public void receive(ClientEvent ce) {
-               if(finished) return;
-               if(!(((verbosity & VERBOSITY_SPLITFILE_PROGRESS) == 
VERBOSITY_SPLITFILE_PROGRESS) &&
-                               (ce instanceof SplitfileProgressEvent)))
-                       return;
-               SimpleProgressMessage progress = 
-                       new SimpleProgressMessage(identifier, 
(SplitfileProgressEvent)ce);
-               handler.outputHandler.queue(progress);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientGet.java (from rev 
7998, trunk/freenet/src/freenet/node/fcp/ClientGet.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientGetMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientGetMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,147 +0,0 @@
-package freenet.node.fcp;
-
-import java.net.MalformedURLException;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.node.RequestStarter;
-import freenet.support.SimpleFieldSet;
-
-/**
- * ClientGet message.
- * 
- * Example:
- * 
- * ClientGet
- * IgnoreDS=false // true = ignore the datastore
- * DSOnly=false // true = only check the datastore, don't route (~= htl 0)
- * URI=KSK at gpl.txt
- * Identifier=Request Number One
- * Verbosity=0 // no status, just tell us when it's done
- * ReturnType=direct // return all at once over the FCP connection
- * MaxSize=100 // maximum size of returned data 
- * MaxTempSize=1000 // maximum size of intermediary data
- * MaxRetries=100 // automatic retry supported as an option
- * PriorityClass=1 // priority class 1 = interactive
- * EndMessage
- */
-public class ClientGetMessage extends FCPMessage {
-
-       public final static String name = "ClientGet";
-       final boolean ignoreDS;
-       final boolean dsOnly;
-       final FreenetURI uri;
-       final String identifier;
-       final int verbosity;
-       final int returnType;
-       final long maxSize;
-       final long maxTempSize;
-       final int maxRetries;
-       final short priorityClass;
-       
-       // FIXME move these to the actual getter process
-       static final int RETURN_TYPE_DIRECT = 0;
-       
-       public ClientGetMessage(SimpleFieldSet fs) throws 
MessageInvalidException {
-               ignoreDS = Boolean.getBoolean(fs.get("IgnoreDS"));
-               dsOnly = Boolean.getBoolean(fs.get("DSOnly"));
-               try {
-                       uri = new FreenetURI(fs.get("URI"));
-               } catch (MalformedURLException e) {
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.URI_PARSE_ERROR, e.getMessage());
-               }
-               identifier = fs.get("Identifier");
-               if(identifier == null)
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "No Identifier");
-               String verbosityString = fs.get("Verbosity");
-               if(verbosityString == null)
-                       verbosity = 0;
-               else {
-                       try {
-                               verbosity = Integer.parseInt(verbosityString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing Verbosity field: "+e.getMessage());
-                       }
-               }
-               String returnTypeString = fs.get("ReturnType");
-               if(returnTypeString == null || 
returnTypeString.equalsIgnoreCase("direct"))
-                       returnType = RETURN_TYPE_DIRECT;
-               else
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.MESSAGE_PARSE_ERROR, "Unknown 
return-type");
-               String maxSizeString = fs.get("MaxSize");
-               if(maxSizeString == null)
-                       // default to unlimited
-                       maxSize = Long.MAX_VALUE;
-               else {
-                       try {
-                               maxSize = Long.parseLong(maxSizeString, 10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing MaxSize field: "+e.getMessage());
-                       }
-               }
-               String maxTempSizeString = fs.get("MaxTempSize");
-               if(maxTempSizeString == null)
-                       // default to unlimited
-                       maxTempSize = Long.MAX_VALUE;
-               else {
-                       try {
-                               maxTempSize = Long.parseLong(maxTempSizeString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing MaxSize field: "+e.getMessage());
-                       }
-               }
-               String maxRetriesString = fs.get("MaxRetries");
-               if(maxRetriesString == null)
-                       // default to 0
-                       maxRetries = 0;
-               else {
-                       try {
-                               maxRetries = Integer.parseInt(maxRetriesString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing MaxSize field: "+e.getMessage());
-                       }
-               }
-               String priorityString = fs.get("PriorityClass");
-               if(priorityString == null) {
-                       // defaults to the one just below fproxy
-                       priorityClass = 
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS;
-               } else {
-                       try {
-                               priorityClass = 
Short.parseShort(priorityString, 10);
-                               if(priorityClass < 
RequestStarter.MAXIMUM_PRIORITY_CLASS || priorityClass > 
RequestStarter.MINIMUM_PRIORITY_CLASS)
-                                       throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_FIELD, "Valid priorities 
are from "+RequestStarter.MAXIMUM_PRIORITY_CLASS+" to 
"+RequestStarter.MINIMUM_PRIORITY_CLASS);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing PriorityClass field: "+e.getMessage());
-                       }
-               }
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("IgnoreDS", Boolean.toString(ignoreDS));
-               fs.put("URI", uri.toString(false));
-               fs.put("Identifier", identifier);
-               fs.put("Verbosity", Integer.toString(verbosity));
-               fs.put("ReturnType", getReturnTypeString());
-               fs.put("MaxSize", Long.toString(maxSize));
-               fs.put("MaxTempSize", Long.toString(maxTempSize));
-               fs.put("MaxRetries", Integer.toString(maxRetries));
-               return fs;
-       }
-
-       private String getReturnTypeString() {
-               if(returnType == RETURN_TYPE_DIRECT)
-                       return "direct";
-               else
-                       throw new IllegalStateException("Unknown return type: 
"+returnType);
-       }
-
-       public String getName() {
-               return name;
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) {
-               handler.startClientGet(this);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientGetMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientHelloMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientHelloMessage.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,46 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-/**
- *  ClientHello
- *  Name=Toad's Test Client
- *  ExpectedVersion=0.7.0
- *  End
- */
-public class ClientHelloMessage extends FCPMessage {
-
-       public final static String name = "ClientHello";
-       String clientName;
-       String clientExpectedVersion;
-       
-       public ClientHelloMessage(SimpleFieldSet fs) throws 
MessageInvalidException {
-               clientName = fs.get("Name");
-               clientExpectedVersion = fs.get("ExpectedVersion");
-               if(clientName == null)
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "ClientHello must 
contain a Name field");
-               if(clientExpectedVersion == null)
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "ClientHello must 
contain a ExpectedVersion field");
-               // FIXME check the expected version
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("Name", clientName);
-               sfs.put("ExpectedVersion", clientExpectedVersion);
-               return sfs;
-       }
-
-       public String getName() {
-               return name;
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) {
-               // We know the Hello is valid.
-               handler.setClientName(clientName);
-               FCPMessage msg = new NodeHelloMessage(node);
-               handler.outputHandler.queue(msg);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientHelloMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientPut.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPut.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientPut.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,95 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.ClientMetadata;
-import freenet.client.FetchException;
-import freenet.client.FetchResult;
-import freenet.client.InsertBlock;
-import freenet.client.InserterContext;
-import freenet.client.InserterException;
-import freenet.client.async.BaseClientPutter;
-import freenet.client.async.ClientCallback;
-import freenet.client.async.ClientGetter;
-import freenet.client.async.ClientPutter;
-import freenet.client.events.ClientEvent;
-import freenet.client.events.ClientEventListener;
-import freenet.client.events.SimpleEventProducer;
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.keys.FreenetURI;
-
-public class ClientPut extends ClientRequest implements ClientCallback, 
ClientEventListener {
-
-       final FreenetURI uri;
-       final ClientPutter inserter;
-       final InserterContext ctx;
-       final InsertBlock block;
-       final FCPConnectionHandler handler;
-       final String identifier;
-       final boolean getCHKOnly;
-       final short priorityClass;
-       final int verbosity;
-       private boolean finished;
-       
-       // Verbosity bitmasks
-       private int VERBOSITY_SPLITFILE_PROGRESS = 1;
-       
-       public ClientPut(FCPConnectionHandler handler, ClientPutMessage 
message) {
-               this.verbosity = message.verbosity;
-               this.handler = handler;
-               this.identifier = message.identifier;
-               this.getCHKOnly = message.getCHKOnly;
-               this.priorityClass = message.priorityClass;
-               ctx = new InserterContext(handler.defaultInsertContext, new 
SimpleEventProducer());
-               ctx.eventProducer.addEventListener(this);
-               ctx.maxInsertRetries = message.maxRetries;
-               // Now go through the fields one at a time
-               uri = message.uri;
-               String mimeType = message.contentType;
-               block = new InsertBlock(message.bucket, new 
ClientMetadata(mimeType), uri);
-               inserter = new ClientPutter(this, message.bucket, uri, new 
ClientMetadata(mimeType), ctx, handler.node.putScheduler, priorityClass, 
getCHKOnly, false, handler.defaultInsertContext);
-               try {
-                       inserter.start();
-               } catch (InserterException e) {
-                       onFailure(e, null);
-               }
-       }
-
-       public void cancel() {
-               inserter.cancel();
-       }
-
-       public void onSuccess(BaseClientPutter state) {
-               finished = true;
-               FCPMessage msg = new PutSuccessfulMessage(identifier, 
state.getURI());
-               handler.outputHandler.queue(msg);
-       }
-
-       public void onFailure(InserterException e, BaseClientPutter state) {
-               finished = true;
-               FCPMessage msg = new PutFailedMessage(e, identifier);
-               handler.outputHandler.queue(msg);
-       }
-
-       public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
-               FCPMessage msg = new URIGeneratedMessage(uri, identifier);
-               handler.outputHandler.queue(msg);
-       }
-
-       public void onSuccess(FetchResult result, ClientGetter state) {
-               // ignore
-       }
-
-       public void onFailure(FetchException e, ClientGetter state) {
-               // ignore
-       }
-
-       public void receive(ClientEvent ce) {
-               if(finished) return;
-               if(!(((verbosity & VERBOSITY_SPLITFILE_PROGRESS) == 
VERBOSITY_SPLITFILE_PROGRESS) &&
-                               (ce instanceof SplitfileProgressEvent)))
-                       return;
-               SimpleProgressMessage progress = 
-                       new SimpleProgressMessage(identifier, 
(SplitfileProgressEvent)ce);
-               handler.outputHandler.queue(progress);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientPut.java (from rev 
7998, trunk/freenet/src/freenet/node/fcp/ClientPut.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientPutMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientPutMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,143 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.File;
-import java.net.MalformedURLException;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.node.RequestStarter;
-import freenet.support.SimpleFieldSet;
-import freenet.support.io.FileBucket;
-
-/**
- * 
- * ClientPut
- * URI=CHK@ // could as easily be an insertable SSK URI
- * Metadata.ContentType=text/html
- * Identifier=Insert-1 // identifier, as always
- * Verbosity=0 // just report when complete
- * MaxRetries=999999 // lots of retries
- * PriorityClass=1 // fproxy priority level
- * 
- * UploadFrom=direct // attached directly to this message
- * DataLength=100 // 100kB
- * or
- * UploadFrom=disk // upload a file from disk
- * Filename=/home/toad/something.html
- * Data
- * 
- * Neither IgnoreDS nor DSOnly make sense for inserts.
- */
-public class ClientPutMessage extends DataCarryingMessage {
-
-       public final static String name = "ClientPut";
-       
-       final FreenetURI uri;
-       final String contentType;
-       final long dataLength;
-       final String identifier;
-       final int verbosity;
-       final int maxRetries;
-       final boolean getCHKOnly;
-       final short priorityClass;
-       final boolean fromDisk;
-       
-       public ClientPutMessage(SimpleFieldSet fs) throws 
MessageInvalidException {
-               try {
-                       String u = fs.get("URI");
-                       if(u == null)
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "No URI");
-                       uri = new FreenetURI(fs.get("URI"));
-               } catch (MalformedURLException e) {
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.URI_PARSE_ERROR, e.getMessage());
-               }
-               identifier = fs.get("Identifier");
-               if(identifier == null)
-                       throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "No Identifier");
-               String verbosityString = fs.get("Verbosity");
-               if(verbosityString == null)
-                       verbosity = 0;
-               else {
-                       try {
-                               verbosity = Integer.parseInt(verbosityString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing Verbosity field: "+e.getMessage());
-                       }
-               }
-               contentType = fs.get("Metadata.ContentType");
-               String maxRetriesString = fs.get("MaxRetries");
-               if(maxRetriesString == null)
-                       // default to 0
-                       maxRetries = 0;
-               else {
-                       try {
-                               maxRetries = Integer.parseInt(maxRetriesString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing MaxSize field: "+e.getMessage());
-                       }
-               }
-               getCHKOnly = Boolean.getBoolean(fs.get("GetCHKOnly"));
-               String priorityString = fs.get("PriorityClass");
-               if(priorityString == null) {
-                       // defaults to the one just below fproxy
-                       priorityClass = 
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS;
-               } else {
-                       try {
-                               priorityClass = 
Short.parseShort(priorityString, 10);
-                               if(priorityClass < 
RequestStarter.MAXIMUM_PRIORITY_CLASS || priorityClass > 
RequestStarter.MINIMUM_PRIORITY_CLASS)
-                                       throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_FIELD, "Valid priorities 
are from "+RequestStarter.MAXIMUM_PRIORITY_CLASS+" to 
"+RequestStarter.MINIMUM_PRIORITY_CLASS);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing PriorityClass field: "+e.getMessage());
-                       }
-               }
-               String uploadFrom = fs.get("UploadFrom");
-               if(uploadFrom != null && uploadFrom.equalsIgnoreCase("disk")) {
-                       fromDisk = true;
-                       String filename = fs.get("Filename");
-                       if(filename == null)
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "Missing field 
Filename");
-                       File f = new File(filename);
-                       if(!(f.exists() && f.isFile() && f.canRead()))
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.FILE_NOT_FOUND, null);
-                       dataLength = f.length();
-                       FileBucket fileBucket = new FileBucket(f, true, false, 
false);
-                       this.bucket = fileBucket;
-               } else {
-                       fromDisk = false;
-                       String dataLengthString = fs.get("DataLength");
-                       if(dataLengthString == null)
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.MISSING_FIELD, "Need DataLength on 
a ClientPut");
-                       try {
-                               dataLength = Long.parseLong(dataLengthString, 
10);
-                       } catch (NumberFormatException e) {
-                               throw new 
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error 
parsing DataLength field: "+e.getMessage());
-                       }
-               }
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("URI", uri.toString());
-               sfs.put("Identifier", identifier);
-               sfs.put("DataLength", Long.toString(dataLength));
-               sfs.put("Verbosity", Integer.toString(verbosity));
-               sfs.put("MaxRetries", Integer.toString(maxRetries));
-               sfs.put("Metadata.ContentType", contentType);
-               return sfs;
-       }
-
-       public String getName() {
-               return name;
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-               handler.startClientPut(this);
-       }
-
-       long dataLength() {
-               if(fromDisk) return 0;
-               return dataLength;
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientPutMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/ClientRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientRequest.java       2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ClientRequest.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,12 +0,0 @@
-package freenet.node.fcp;
-
-/**
- * A request process carried out by the node for an FCP client.
- * Examples: ClientGet, ClientPut, MultiGet.
- */
-public abstract class ClientRequest {
-
-       /** Cancel */
-       public abstract void cancel();
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/ClientRequest.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/ClientRequest.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/DataCarryingMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java 2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/DataCarryingMessage.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,37 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-import freenet.support.BucketTools;
-
-
-public abstract class DataCarryingMessage extends FCPMessage {
-
-       protected Bucket bucket;
-       
-       abstract long dataLength();
-
-       public void readFrom(InputStream is, BucketFactory bf) throws 
IOException {
-               long len = dataLength();
-               if(len < 0)
-                       throw new IllegalArgumentException("Invalid length: 
"+len);
-               if(len == 0) return;
-               Bucket bucket = bf.makeBucket(len);
-               BucketTools.copyFrom(bucket, is, len);
-               this.bucket = bucket;
-       }
-       
-       public void send(OutputStream os) throws IOException {
-               super.send(os);
-               BucketTools.copyTo(bucket, os, dataLength());
-       }
-
-       String getEndString() {
-               return "Data";
-       }
-       
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/DataCarryingMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/DataFoundMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/DataFoundMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,35 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.FetchResult;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class DataFoundMessage extends FCPMessage {
-
-       final String identifier;
-       final String mimeType;
-       final long dataLength;
-       
-       public DataFoundMessage(FCPConnectionHandler handler, FetchResult fr, 
String identifier) {
-               this.identifier = identifier;
-               this.mimeType = fr.getMimeType();
-               this.dataLength = fr.size();
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("Identifier", identifier);
-               fs.put("Metadata.ContentType", mimeType);
-               fs.put("DataLength", Long.toString(dataLength));
-               return fs;
-       }
-
-       public String getName() {
-               return "DataFound";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) throws 
MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "DataFound goes 
from server to client not the other way around");
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/DataFoundMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionHandler.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,130 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.util.HashMap;
-
-import freenet.client.FetcherContext;
-import freenet.client.HighLevelSimpleClient;
-import freenet.client.InserterContext;
-import freenet.node.Node;
-import freenet.support.BucketFactory;
-import freenet.support.Logger;
-
-public class FCPConnectionHandler {
-
-       final Socket sock;
-       final FCPConnectionInputHandler inputHandler;
-       final FCPConnectionOutputHandler outputHandler;
-       final Node node;
-       private boolean isClosed;
-       private boolean inputClosed;
-       private boolean outputClosed;
-       private String clientName;
-       final BucketFactory bf;
-       final HashMap requestsByIdentifier;
-       final FetcherContext defaultFetchContext;
-       public InserterContext defaultInsertContext;
-       
-       public FCPConnectionHandler(Socket s, Node node) {
-               this.sock = s;
-               this.node = node;
-               isClosed = false;
-               this.bf = node.tempBucketFactory;
-               requestsByIdentifier = new HashMap();
-               HighLevelSimpleClient client = node.makeClient((short)0);
-               defaultFetchContext = client.getFetcherContext();
-               defaultInsertContext = client.getInserterContext();
-               this.inputHandler = new FCPConnectionInputHandler(this);
-               this.outputHandler = new FCPConnectionOutputHandler(this);
-               inputHandler.start();
-       }
-       
-       public void close() {
-               ClientRequest[] requests;
-               synchronized(this) {
-                       isClosed = true;
-                       requests = new 
ClientRequest[requestsByIdentifier.size()];
-                       requests = (ClientRequest[]) 
requestsByIdentifier.values().toArray(requests);
-               }
-               for(int i=0;i<requests.length;i++)
-                       requests[i].cancel();
-       }
-       
-       public boolean isClosed() {
-               return isClosed;
-       }
-       
-       public void closedInput() {
-               try {
-                       sock.shutdownInput();
-               } catch (IOException e) {
-                       // Ignore
-               }
-               synchronized(this) {
-                       inputClosed = true;
-                       if(!outputClosed) return;
-               }
-               try {
-                       sock.close();
-               } catch (IOException e) {
-                       // Ignore
-               }
-       }
-       
-       public void closedOutput() {
-               try {
-                       sock.shutdownOutput();
-               } catch (IOException e) {
-                       // Ignore
-               }
-               synchronized(this) {
-                       outputClosed = true;
-                       if(!inputClosed) return;
-               }
-               try {
-                       sock.close();
-               } catch (IOException e) {
-                       // Ignore
-               }
-       }
-
-       public void setClientName(String name) {
-               this.clientName = name;
-       }
-       
-       public String getClientName() {
-               return clientName;
-       }
-
-       public void startClientGet(ClientGetMessage message) {
-               String id = message.identifier;
-               if(requestsByIdentifier.containsKey(id)) {
-                       Logger.normal(this, "Identifier collision on "+this);
-                       FCPMessage msg = new IdentifierCollisionMessage(id);
-                       outputHandler.queue(msg);
-                       return;
-               }
-               synchronized(this) {
-                       if(isClosed) return;
-                       ClientGet cg = new ClientGet(this, message);
-               }
-       }
-
-       public void startClientPut(ClientPutMessage message) {
-               String id = message.identifier;
-               if(requestsByIdentifier.containsKey(id)) {
-                       Logger.normal(this, "Identifier collision on "+this);
-                       FCPMessage msg = new IdentifierCollisionMessage(id);
-                       outputHandler.queue(msg);
-                       return;
-               }
-               synchronized(this) {
-                       if(isClosed) return;
-                       ClientPut cg = new ClientPut(this, message);
-               }
-               
-               // TODO Auto-generated method stub
-               
-       }
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionHandler.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionInputHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionInputHandler.java   
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,82 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import freenet.support.Logger;
-import freenet.support.SimpleFieldSet;
-import freenet.support.io.LineReadingInputStream;
-
-public class FCPConnectionInputHandler implements Runnable {
-
-       final FCPConnectionHandler handler;
-       
-       FCPConnectionInputHandler(FCPConnectionHandler handler) {
-               this.handler = handler;
-       }
-
-       void start() {
-               Thread t = new Thread(this, "FCP input handler for 
"+handler.sock.getRemoteSocketAddress()+":"+handler.sock.getPort());
-               t.setDaemon(true);
-               t.start();
-       }
-       
-       public void run() {
-               try {
-                       realRun();
-               } catch (IOException e) {
-                       Logger.minor(this, "Caught "+e, e);
-               } catch (Throwable t) {
-                       Logger.error(this, "Caught "+t, t);
-               }
-               handler.close();
-               handler.closedInput();
-       }
-       
-       public void realRun() throws IOException {
-               InputStream is = handler.sock.getInputStream();
-               LineReadingInputStream lis = new LineReadingInputStream(is);
-               
-               boolean firstMessage = true;
-               
-               while(true) {
-                       SimpleFieldSet fs;
-                       // Read a message
-                       String messageType = lis.readLine(64, 64);
-                       if(messageType.equals("")) continue;
-                       fs = new SimpleFieldSet(lis, 4096, 128);
-                       FCPMessage msg;
-                       try {
-                               msg = FCPMessage.create(messageType, fs);
-                               if(msg == null) continue;
-                       } catch (MessageInvalidException e) {
-                               FCPMessage err = new 
ProtocolErrorMessage(e.protocolCode, false, e.getMessage());
-                               handler.outputHandler.queue(err);
-                               continue;
-                       }
-                       if(firstMessage && !(msg instanceof 
ClientHelloMessage)) {
-                               FCPMessage err = new 
ProtocolErrorMessage(ProtocolErrorMessage.CLIENT_HELLO_MUST_BE_FIRST_MESSAGE, 
true, null);
-                               handler.outputHandler.queue(err);
-                               handler.close();
-                               continue;
-                       }
-                       if(msg instanceof DataCarryingMessage) {
-                               ((DataCarryingMessage)msg).readFrom(lis, 
handler.bf);
-                       }
-                       if((!firstMessage) && msg instanceof 
ClientHelloMessage) {
-                               FCPMessage err = new 
ProtocolErrorMessage(ProtocolErrorMessage.NO_LATE_CLIENT_HELLOS, false, null);
-                               handler.outputHandler.queue(err);
-                               continue;
-                       }
-                       try {
-                               msg.run(handler, handler.node);
-                       } catch (MessageInvalidException e) {
-                               FCPMessage err = new 
ProtocolErrorMessage(e.protocolCode, false, e.getMessage());
-                               handler.outputHandler.queue(err);
-                               continue;
-                       }
-                       firstMessage = false;
-                       if(handler.isClosed()) return;
-               }
-       }
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionInputHandler.java 
(from rev 7998, 
trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionOutputHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionOutputHandler.java  
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,65 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.LinkedList;
-
-import freenet.support.Logger;
-
-public class FCPConnectionOutputHandler implements Runnable {
-
-       final FCPConnectionHandler handler;
-       final LinkedList outQueue;
-       
-       public FCPConnectionOutputHandler(FCPConnectionHandler handler) {
-               this.handler = handler;
-               this.outQueue = new LinkedList();
-               Thread t = new Thread(this, "FCP output handler for 
"+handler.sock.getRemoteSocketAddress()+":"+handler.sock.getPort());
-               t.setDaemon(true);
-               t.start();
-       }
-
-       public void run() {
-               try {
-                       realRun();
-               } catch (IOException e) {
-                       Logger.minor(this, "Caught "+e, e);
-               } catch (Throwable t) {
-                       Logger.minor(this, "Caught "+t, t);
-               }
-               handler.close();
-               handler.closedOutput();
-       }
-
-       private void realRun() throws IOException {
-               OutputStream os = handler.sock.getOutputStream();
-               while(true) {
-                       FCPMessage msg;
-                       synchronized(outQueue) {
-                               while(true) {
-                                       if(outQueue.isEmpty()) {
-                                               if(handler.isClosed()) return;
-                                               try {
-                                                       outQueue.wait(10000);
-                                               } catch (InterruptedException 
e) {
-                                                       // Ignore
-                                               }
-                                               continue;
-                                       }
-                                       msg = (FCPMessage) 
outQueue.removeFirst();
-                                       break;
-                               }
-                       }
-                       msg.send(os);
-                       if(handler.isClosed()) return;
-               }
-       }
-
-       public void queue(FCPMessage msg) {
-               synchronized(outQueue) {
-                       outQueue.add(msg);
-                       outQueue.notifyAll();
-               }
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/FCPConnectionOutputHandler.java 
(from rev 7998, 
trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/FCPMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPMessage.java  2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/FCPMessage.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,48 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public abstract class FCPMessage {
-
-       public void send(OutputStream os) throws IOException {
-               SimpleFieldSet sfs = getFieldSet();
-               sfs.setEndMarker(getEndString());
-               String msg = sfs.toString();
-               os.write((getName()+"\n").getBytes("UTF-8"));
-               os.write(msg.getBytes("UTF-8"));
-       }
-
-       String getEndString() {
-               return "EndMessage";
-       }
-       
-       public abstract SimpleFieldSet getFieldSet();
-
-       public abstract String getName();
-       
-       public static FCPMessage create(String name, SimpleFieldSet fs) throws 
MessageInvalidException {
-               if(name.equals(ClientHelloMessage.name))
-                       return new ClientHelloMessage(fs);
-               if(name.equals(ClientGetMessage.name))
-                       return new ClientGetMessage(fs);
-               if(name.equals(ClientPutMessage.name))
-                       return new ClientPutMessage(fs);
-               if(name.equals(GenerateSSKMessage.name))
-                       return new GenerateSSKMessage();
-               if(name.equals("Void"))
-                       return null;
-//             if(name.equals("ClientPut"))
-//                     return new ClientPutFCPMessage(fs);
-               // TODO Auto-generated method stub
-               return null;
-       }
-
-       /** Do whatever it is that we do with this type of message. 
-        * @throws MessageInvalidException */
-       public abstract void run(FCPConnectionHandler handler, Node node) 
throws MessageInvalidException;
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/FCPMessage.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/FCPMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/FCPServer.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPServer.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/FCPServer.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,45 +0,0 @@
-package freenet.node.fcp;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-
-import freenet.node.Node;
-import freenet.support.Logger;
-
-/**
- * FCP server process.
- */
-public class FCPServer implements Runnable {
-
-       final ServerSocket sock;
-       final Node node;
-       
-       public FCPServer(int port, Node node) throws IOException {
-               this.sock = new ServerSocket(port, 0, 
InetAddress.getByName("127.0.0.1"));
-               this.node = node;
-               Thread t = new Thread(this, "FCP server");
-               t.setDaemon(true);
-               t.start();
-       }
-       
-       public void run() {
-               while(true) {
-                       try {
-                               realRun();
-                       } catch (IOException e) {
-                               Logger.minor(this, "Caught "+e, e);
-                       } catch (Throwable t) {
-                               Logger.error(this, "Caught "+t, t);
-                       }
-               }
-       }
-
-       private void realRun() throws IOException {
-               // Accept a connection
-               Socket s = sock.accept();
-               FCPConnectionHandler handler = new FCPConnectionHandler(s, 
node);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/FCPServer.java (from rev 
7998, trunk/freenet/src/freenet/node/fcp/FCPServer.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/GenerateSSKMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/GenerateSSKMessage.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,29 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.keys.FreenetURI;
-import freenet.keys.InsertableClientSSK;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class GenerateSSKMessage extends FCPMessage {
-
-       static final String name = "GenerateSSK";
-       
-       public SimpleFieldSet getFieldSet() {
-               return new SimpleFieldSet();
-       }
-
-       public String getName() {
-               return name;
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-       InsertableClientSSK key = InsertableClientSSK.createRandom(node.random);
-       FreenetURI insertURI = key.getInsertURI();
-       FreenetURI requestURI = key.getURI();
-       SSKKeypairMessage msg = new SSKKeypairMessage(insertURI, requestURI);
-       handler.outputHandler.queue(msg);
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/GenerateSSKMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/GetFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/GetFailedMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,51 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.FailureCodeTracker;
-import freenet.client.FetchException;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class GetFailedMessage extends FCPMessage {
-
-       final int code;
-       final String codeDescription;
-       final String shortCodeDescription;
-       final String extraDescription;
-       final FailureCodeTracker tracker;
-       final boolean isFatal;
-       final String identifier;
-       
-       public GetFailedMessage(FCPConnectionHandler handler, FetchException e, 
String identifier) {
-               this.tracker = e.errorCodes;
-               this.code = e.mode;
-               this.codeDescription = FetchException.getMessage(code);
-               this.extraDescription = e.extraMessage;
-               this.shortCodeDescription = 
FetchException.getShortMessage(code);
-               this.isFatal = e.isFatal();
-               this.identifier = identifier;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("Code", Integer.toString(code));
-               sfs.put("CodeDescription", codeDescription);
-               if(extraDescription != null)
-                       sfs.put("ExtraDescription", extraDescription);
-               sfs.put("Fatal", Boolean.toString(isFatal));
-               if(tracker != null) {
-                       tracker.copyToFieldSet(sfs, "Errors.");
-               }
-               sfs.put("ShortCodeDescription", shortCodeDescription);
-               sfs.put("Identifier", identifier);
-               return sfs;
-       }
-
-       public String getName() {
-               return "GetFailed";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) throws 
MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "FetchError goes 
from server to client not the other way around");
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/GetFailedMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/IdentifierCollisionMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java  
2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/node/fcp/IdentifierCollisionMessage.java  
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,29 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class IdentifierCollisionMessage extends FCPMessage {
-
-       final String identifier;
-       
-       public IdentifierCollisionMessage(String id) {
-               this.identifier = id;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("Identifier", identifier);
-               return sfs;
-       }
-
-       public String getName() {
-               return "IdentifierCollision";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, 
"IdentifierCollision goes from server to client not the other way around");
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/IdentifierCollisionMessage.java 
(from rev 7998, 
trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/MessageInvalidException.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/MessageInvalidException.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/MessageInvalidException.java 
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,17 +0,0 @@
-package freenet.node.fcp;
-
-/**
- * Thrown when an FCP message is invalid. This is after we have a
- * SimpleFieldSet; one example is if the fields necessary do not exist.
- * This is a catch-all error; it corresponds to MESSAGE_PARSE_ERROR on
- * ProtocolError.
- */
-public class MessageInvalidException extends Exception {
-
-       int protocolCode;
-       
-       public MessageInvalidException(int protocolCode, String extra) {
-               super(extra);
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/MessageInvalidException.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/MessageInvalidException.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/NodeHelloMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/NodeHelloMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,43 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.node.Version;
-import freenet.support.SimpleFieldSet;
-
-/**
- * NodeHello
- *
- * NodeHello
- * FCPVersion=<protocol version>
- * Node=Fred
- * Version=0.7.0,401
- * EndMessage
- */
-public class NodeHelloMessage extends FCPMessage {
-
-       private final Node node;
-       
-       public NodeHelloMessage(Node node) {
-               this.node = node;
-       }
-       
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               // FIXME
-               sfs.put("FCPVersion", "2.0");
-               sfs.put("Node", "Fred");
-               sfs.put("Version", Version.getVersionString());
-               sfs.put("Testnet", Boolean.toString(node.isTestnetEnabled()));
-               return sfs;
-       }
-
-       public String getName() {
-               return "NodeHello";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) {
-               throw new UnsupportedOperationException();
-               // Client should not be sending this!
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/NodeHelloMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/ProtocolErrorMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/ProtocolErrorMessage.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,84 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.node.Node;
-import freenet.support.Logger;
-import freenet.support.SimpleFieldSet;
-
-/**
- * ProtocolError (some problem parsing the other side's FCP messages)
- * 
- * ProtocolError
- * Code=1
- * CodeDescription=ClientHello must be first message
- * ExtraDescription=Duh
- * Fatal=false // means the connection stays open
- * EndMessage
- */
-public class ProtocolErrorMessage extends FCPMessage {
-
-       static final int CLIENT_HELLO_MUST_BE_FIRST_MESSAGE = 1;
-       static final int NO_LATE_CLIENT_HELLOS = 2;
-       static final int MESSAGE_PARSE_ERROR = 3;
-       static final int URI_PARSE_ERROR = 4;
-       static final int MISSING_FIELD = 5;
-       static final int ERROR_PARSING_NUMBER = 6;
-       static final int INVALID_MESSAGE = 7;
-       static final int INVALID_FIELD = 8;
-       static final int FILE_NOT_FOUND = 9;
-       
-       final int code;
-       final String extra;
-       final boolean fatal;
-       
-       private String codeDescription() {
-               switch(code) {
-               case CLIENT_HELLO_MUST_BE_FIRST_MESSAGE:
-                       return "ClientHello must be first message";
-               case NO_LATE_CLIENT_HELLOS:
-                       return "No late ClientHello's accepted";
-               case MESSAGE_PARSE_ERROR:
-                       return "Unknown message parsing error";
-               case URI_PARSE_ERROR:
-                       return "Error parsing URI";
-               case MISSING_FIELD:
-                       return "Missing field";
-               case ERROR_PARSING_NUMBER:
-                       return "Error parsing a numeric field";
-               case INVALID_MESSAGE:
-                       return "Don't know what to do with message";
-               case INVALID_FIELD:
-                       return "Invalid field value";
-               case FILE_NOT_FOUND:
-                       return "File not found, not a file or not readable";
-               default:
-                       Logger.error(this, "Unknown error code: "+code, new 
Exception("debug"));
-               return "(Unknown)";
-               }
-       }
-
-       public ProtocolErrorMessage(int code, boolean fatal, String extra) {
-               this.code = code;
-               this.extra = extra;
-               this.fatal = fatal;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("Code", Integer.toString(code));
-               sfs.put("CodeDescription", codeDescription());
-               if(extra != null)
-                       sfs.put("ExtraDescription", extra);
-               sfs.put("Fatal", Boolean.toString(fatal));
-               return sfs;
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) {
-               Logger.error(this, "Client reported protocol error");
-               if(fatal) handler.close();
-       }
-
-       public String getName() {
-               return "ProtocolError";
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/ProtocolErrorMessage.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/PutFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java    2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/PutFailedMessage.java        
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,57 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.FailureCodeTracker;
-import freenet.client.InserterException;
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class PutFailedMessage extends FCPMessage {
-
-       final int code;
-       final String codeDescription;
-       final String extraDescription;
-       final String codeShortDescription;
-       final FailureCodeTracker tracker;
-       final FreenetURI expectedURI;
-       final String identifier;
-       final boolean isFatal;
-       
-       public PutFailedMessage(InserterException e, String identifier) {
-               this.code = e.getMode();
-               this.codeDescription = InserterException.getMessage(code);
-               this.codeShortDescription = 
InserterException.getShortMessage(code);
-               this.extraDescription = e.extra;
-               this.tracker = e.errorCodes;
-               this.expectedURI = e.uri;
-               this.identifier = identifier;
-               this.isFatal = e.isFatal();
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("Identifier", identifier);
-               fs.put("Code", Integer.toString(code));
-               fs.put("CodeDescription", codeDescription);
-               if(extraDescription != null)
-                       fs.put("ExtraDescription", extraDescription);
-               if(tracker != null) {
-                       tracker.copyToFieldSet(fs, "Errors.");
-               }
-               fs.put("Fatal", Boolean.toString(isFatal));
-               fs.put("ShortCodeDescription", codeShortDescription);
-               if(expectedURI != null)
-                       fs.put("ExpectedURI", expectedURI.toString());
-               return fs;
-       }
-
-       public String getName() {
-               return "PutFailed";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PutFailed goes 
from server to client not the other way around");
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/PutFailedMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/PutSuccessfulMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java        
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/PutSuccessfulMessage.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,33 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class PutSuccessfulMessage extends FCPMessage {
-
-       public final String identifier;
-       public final FreenetURI uri;
-       
-       public PutSuccessfulMessage(String identifier, FreenetURI uri) {
-               this.identifier = identifier;
-               this.uri = uri;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("Identifier", identifier);
-               fs.put("URI", uri.toString());
-               return fs;
-       }
-
-       public String getName() {
-               return "PutSuccessful";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "InsertSuccessful 
goes from server to client not the other way around");
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/PutSuccessfulMessage.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/SSKKeypairMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java   2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/SSKKeypairMessage.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,34 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class SSKKeypairMessage extends FCPMessage {
-
-       private final FreenetURI insertURI;
-       private final FreenetURI requestURI;
-       
-       public SSKKeypairMessage(FreenetURI insertURI, FreenetURI requestURI) {
-               this.insertURI = insertURI;
-               this.requestURI = requestURI;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet sfs = new SimpleFieldSet();
-               sfs.put("InsertURI", insertURI.toString());
-               sfs.put("RequestURI", requestURI.toString());
-               return sfs;
-       }
-
-       public String getName() {
-               return "SSKKeypair";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) throws 
MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "SSKKeypair goes 
from server to client not the other way around");
-       }
-       
-       
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/SSKKeypairMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java)

Deleted: 
branches/freenet-freejvms/src/freenet/node/fcp/SimpleProgressMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/SimpleProgressMessage.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,37 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.client.events.SplitfileProgressEvent;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class SimpleProgressMessage extends FCPMessage {
-
-       private final String ident;
-       private final SplitfileProgressEvent event;
-       
-       public SimpleProgressMessage(String identifier, SplitfileProgressEvent 
event) {
-               this.ident = identifier;
-               this.event = event;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("Total", Integer.toString(event.totalBlocks));
-               fs.put("Required", Integer.toString(event.minSuccessfulBlocks));
-               fs.put("Failed", Integer.toString(event.failedBlocks));
-               fs.put("FatallyFailed", 
Integer.toString(event.fatallyFailedBlocks));
-               fs.put("Succeeded",Integer.toString(event.fetchedBlocks));
-               fs.put("FinalizedTotal", 
Boolean.toString(event.finalizedTotal));
-               fs.put("Identifier", ident);
-               return fs;
-       }
-
-       public String getName() {
-               return "SimpleProgress";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node) throws 
MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "SimpleProgress 
goes from server to client not the other way around");
-       }
-
-}

Copied: 
branches/freenet-freejvms/src/freenet/node/fcp/SimpleProgressMessage.java (from 
rev 7998, trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/node/fcp/URIGeneratedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java 2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/node/fcp/URIGeneratedMessage.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,33 +0,0 @@
-package freenet.node.fcp;
-
-import freenet.keys.FreenetURI;
-import freenet.node.Node;
-import freenet.support.SimpleFieldSet;
-
-public class URIGeneratedMessage extends FCPMessage {
-
-       private final FreenetURI uri;
-       private final String identifier;
-       
-       public URIGeneratedMessage(FreenetURI uri, String identifier) {
-               this.uri = uri;
-               this.identifier = identifier;
-       }
-
-       public SimpleFieldSet getFieldSet() {
-               SimpleFieldSet fs = new SimpleFieldSet();
-               fs.put("URI", uri.toString());
-               fs.put("Identifier", identifier);
-               return fs;
-       }
-
-       public String getName() {
-               return "URIGenerated";
-       }
-
-       public void run(FCPConnectionHandler handler, Node node)
-                       throws MessageInvalidException {
-               throw new 
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "URIGenerated 
goes from server to client not the other way around");
-       }
-
-}

Copied: branches/freenet-freejvms/src/freenet/node/fcp/URIGeneratedMessage.java 
(from rev 7998, trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java)

Deleted: branches/freenet-freejvms/src/freenet/store/BaseFreenetStore.java
===================================================================
--- branches/freenet-freejvms/src/freenet/store/BaseFreenetStore.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/store/BaseFreenetStore.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,114 +0,0 @@
-package freenet.store;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-
-import freenet.keys.CHKBlock;
-import freenet.keys.CHKVerifyException;
-import freenet.keys.NodeCHK;
-import freenet.support.Fields;
-import freenet.support.Logger;
-
-/**
- * @author amphibian
- * 
- * Freenet datastore.
- */
-public class BaseFreenetStore implements FreenetStore {
-
-    final DataStore dataStore;
-    final DataStore headersStore;
-    
-    static final int DATA_BLOCK_SIZE = 32*1024;
-    static final int HEADER_BLOCK_SIZE = 512;
-    
-    public BaseFreenetStore(String filename, long maxBlocks) throws 
FileNotFoundException, Exception {
-        this(new RandomAccessFile(filename+"-store","rw"), new 
RandomAccessFile(filename+"-storeindex","rw"),
-                new RandomAccessFile(filename+"-header","rw"), new 
RandomAccessFile(filename+"-headerindex","rw"), maxBlocks);
-    }
-    
-    public BaseFreenetStore(RandomAccessFile storeFile, RandomAccessFile 
storeIndexFile, 
-            RandomAccessFile headerStoreFile, RandomAccessFile 
headerStoreIndexFile, long maxBlocks) throws Exception {
-        dataStore = new DataStore(storeIndexFile, storeFile, DATA_BLOCK_SIZE, 
maxBlocks);
-        headersStore = new DataStore(headerStoreIndexFile, headerStoreFile, 
HEADER_BLOCK_SIZE, maxBlocks);
-    }
-    
-    /**
-     * @param storeFilename The name of the file containing the store.
-     * @param headerStoreFilename The name of the file containing the headers 
store.
-     * @param maxBlocks The maximum number of chunks stored in this store.
-     */
-    public BaseFreenetStore(String storeFilename, String headerStoreFilename, 
long maxBlocks) throws Exception {
-        dataStore = new DataStore(new File(storeFilename), new 
File(storeFilename+".index"), DATA_BLOCK_SIZE, maxBlocks);
-        // FIXME: What's the right size? 512 is probably enough for SSKs?
-        headersStore = new DataStore(new File(headerStoreFilename), new 
File(headerStoreFilename+".index"), HEADER_BLOCK_SIZE, maxBlocks);
-    }
-
-    /**
-     * Retrieve a block.
-     * @return null if there is no such block stored, otherwise the block.
-     */
-    public synchronized CHKBlock fetch(NodeCHK chk, boolean dontPromote) 
throws IOException {
-        byte[] data = dataStore.getDataForBlock(chk, dontPromote);
-        if(data == null) {
-            if(headersStore.getDataForBlock(chk, true) != null) {
-                Logger.normal(this, "Deleting: "+chk+" headers, no data");
-                headersStore.delete(chk);
-            }
-            return null;
-        }
-        byte[] headers = headersStore.getDataForBlock(chk, dontPromote);
-        if(headers == null) {
-            // No headers, delete
-            Logger.normal(this, "Deleting: "+chk+" data, no headers");
-            dataStore.delete(chk);
-            return null;
-        }
-        // Decode
-        int headerLen = ((headers[0] & 0xff) << 8) + (headers[1] & 0xff);
-        if(headerLen > HEADER_BLOCK_SIZE-2) {
-            Logger.normal(this, "Invalid header data on "+chk+", deleting");
-            dataStore.delete(chk);
-            headersStore.delete(chk);
-            return null;
-        }
-        byte[] buf = new byte[headerLen];
-        System.arraycopy(headers, 2, buf, 0, headerLen);
-        Logger.minor(this, "Get key: "+chk);
-        Logger.minor(this, "Raw headers: "+headers.length+" bytes, hash 
"+Fields.hashCode(headers));
-        Logger.minor(this, "Headers: "+headerLen+" bytes, hash 
"+Fields.hashCode(buf));
-        Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+Fields.hashCode(data));
-        try {
-            return new CHKBlock(data, buf, chk);
-        } catch (CHKVerifyException e) {
-            Logger.normal(this, "Does not verify, deleting: "+chk);
-            dataStore.delete(chk);
-            headersStore.delete(chk);
-            return null;
-        }
-    }
-
-    /**
-     * Store a block.
-     */
-    public synchronized void put(CHKBlock block) throws IOException {
-        byte[] data = block.getData();
-        byte[] headers = block.getHeader();
-        int hlen = headers.length;
-        if(data.length != DATA_BLOCK_SIZE || hlen > HEADER_BLOCK_SIZE-2)
-            throw new IllegalArgumentException("Too big - data: 
"+data.length+" should be "+
-                    DATA_BLOCK_SIZE+", headers: "+hlen+" - should be 
"+(HEADER_BLOCK_SIZE-2));
-        byte[] hbuf = new byte[HEADER_BLOCK_SIZE];
-        hbuf[0] = (byte)(hlen >> 8);
-        hbuf[1] = (byte)(hlen & 0xff);
-        System.arraycopy(headers, 0, hbuf, 2, hlen);
-        Logger.minor(this, "Put key: "+block.getKey());
-        Logger.minor(this, "Raw headers: "+hbuf.length+" bytes, hash 
"+Fields.hashCode(hbuf));
-        Logger.minor(this, "Headers: "+hlen+" bytes, hash 
"+Fields.hashCode(headers));
-        Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+Fields.hashCode(data));
-        dataStore.addDataAsBlock(block.getKey(), data);
-        headersStore.addDataAsBlock(block.getKey(), hbuf);
-    }
-}

Modified: 
branches/freenet-freejvms/src/freenet/store/BerkeleyDBFreenetStore.java
===================================================================
--- branches/freenet-freejvms/src/freenet/store/BerkeleyDBFreenetStore.java     
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/store/BerkeleyDBFreenetStore.java     
2006-02-03 22:55:27 UTC (rev 7999)
@@ -4,6 +4,7 @@
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.Arrays;

 import com.sleepycat.bind.tuple.TupleBinding;
 import com.sleepycat.bind.tuple.TupleInput;
@@ -23,10 +24,16 @@
 import com.sleepycat.je.SecondaryKeyCreator;
 import com.sleepycat.je.Transaction;

+import freenet.crypt.DSAPublicKey;
 import freenet.keys.CHKBlock;
 import freenet.keys.CHKVerifyException;
+import freenet.keys.KeyBlock;
 import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;
+import freenet.keys.SSKBlock;
+import freenet.keys.SSKVerifyException;
 import freenet.support.Fields;
+import freenet.support.HexUtil;
 import freenet.support.Logger;

 /** 
@@ -39,8 +46,8 @@
  */
 public class BerkeleyDBFreenetStore implements FreenetStore {

-    static final int CHK_DATA_BLOCK_SIZE = 32*1024;
-    static final int CHK_HEADER_BLOCK_SIZE = 36;
+    final int dataBlockSize;
+    final int headerBlockSize;

        private final Environment environment;
        private final TupleBinding storeBlockTupleBinding;
@@ -61,8 +68,9 @@
      * @param the directory where the store is located
      * @throws FileNotFoundException if the dir does not exist and could not 
be created
      */
-       public BerkeleyDBFreenetStore(String storeDir,int maxChkBlocks) throws 
Exception
-       {
+       public BerkeleyDBFreenetStore(String storeDir, int maxChkBlocks, int 
blockSize, int headerSize) throws Exception {
+               this.dataBlockSize = blockSize;
+               this.headerBlockSize = headerSize;
                // Percentage of the database that must contain usefull data
                // decrease to increase performance, increase to save disk space
                System.setProperty("je.cleaner.minUtilization","98");
@@ -135,11 +143,20 @@
        DatabaseEntry routingkeyDBE = new DatabaseEntry(routingkey);
        DatabaseEntry blockDBE = new DatabaseEntry();
        Cursor c = null;
+       Transaction t = null;
        try{
-               Transaction t = environment.beginTransaction(null,null);
+               t = environment.beginTransaction(null,null);
                c = chkDB.openCursor(t,null);
-               
-               if(c.getSearchKey(routingkeyDBE,blockDBE,LockMode.DEFAULT)
+
+               /**
+                * We will have to write, unless both dontPromote and the key 
is valid.
+                * The lock only applies to this record, so it's not a big 
problem for our use.
+                * What *IS* a big problem is that if we take a 
LockMode.DEFAULT, and two threads
+                * access the same key, they will both take the read lock, and 
then both try to
+                * take the write lock. Neither can relinquish the read in 
order for the other to
+                * take the write, so we're screwed.
+                */
+               if(c.getSearchKey(routingkeyDBE,blockDBE,LockMode.RMW)
                                !=OperationStatus.SUCCESS) {
                        c.close();
                        t.abort();
@@ -150,12 +167,12 @@

                CHKBlock block = null;
                try{
-                       byte[] header = new byte[CHK_HEADER_BLOCK_SIZE];
-                       byte[] data = new byte[CHK_DATA_BLOCK_SIZE];
+                       byte[] header = new byte[headerBlockSize];
+                       byte[] data = new byte[dataBlockSize];
                        synchronized(chkStore) {
-                               
chkStore.seek(storeBlock.offset*(long)(CHK_DATA_BLOCK_SIZE+CHK_HEADER_BLOCK_SIZE));
-                               chkStore.read(header);
-                               chkStore.read(data);
+                               
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
+                               chkStore.readFully(header);
+                               chkStore.readFully(data);
                        }


@@ -179,7 +196,7 @@
                    Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+data);

                }catch(CHKVerifyException ex){
-                       Logger.normal(this, "Does not verify, setting 
accessTime to 0 for : "+chk);
+                       Logger.normal(this, "Does not verify ("+ex+"), setting 
accessTime to 0 for : "+chk);
                        storeBlock.setRecentlyUsedToZero();
                        DatabaseEntry updateDBE = new DatabaseEntry();
                        storeBlockTupleBinding.objectToEntry(storeBlock, 
updateDBE);
@@ -189,10 +206,12 @@
                    return null;
                }
                return block;
-       }catch(Exception ex) {  // FIXME: ugly  
+       }catch(Throwable ex) {  // FIXME: ugly  
                if(c!=null) {
                        try{c.close();}catch(DatabaseException ex2){}
                }
+               if(t!=null)
+                       try{t.abort();}catch(DatabaseException ex2){}
                Logger.error(this, "Caught "+ex, ex);
                ex.printStackTrace();
                throw new IOException(ex.getMessage());
@@ -201,24 +220,199 @@
 //     return null;
     }

+       /**
+     * Retrieve a block.
+     * @param dontPromote If true, don't promote data if fetched.
+     * @return null if there is no such block stored, otherwise the block.
+     */
+    public SSKBlock fetch(NodeSSK chk, boolean dontPromote) throws IOException
+    {
+       if(closed)
+               return null;
+       
+       byte[] routingkey = chk.getRoutingKey();
+       DatabaseEntry routingkeyDBE = new DatabaseEntry(routingkey);
+       DatabaseEntry blockDBE = new DatabaseEntry();
+       Cursor c = null;
+       Transaction t = null;
+       try{
+               t = environment.beginTransaction(null,null);
+               c = chkDB.openCursor(t,null);
+               
+               if(c.getSearchKey(routingkeyDBE,blockDBE,LockMode.RMW)
+                               !=OperationStatus.SUCCESS) {
+                       c.close();
+                       t.abort();
+                       return null;
+               }
+
+               StoreBlock storeBlock = (StoreBlock) 
storeBlockTupleBinding.entryToObject(blockDBE);
+                               
+               SSKBlock block = null;
+               try{
+                       byte[] header = new byte[headerBlockSize];
+                       byte[] data = new byte[dataBlockSize];
+                       synchronized(chkStore) {
+                               
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
+                               chkStore.readFully(header);
+                               chkStore.readFully(data);
+                       }
+                       
+                       
+                       block = new SSKBlock(data,header,chk, true);
+                       
+                       if(!dontPromote)
+                       {
+                               storeBlock.updateRecentlyUsed();
+                               DatabaseEntry updateDBE = new DatabaseEntry();
+                               
storeBlockTupleBinding.objectToEntry(storeBlock, updateDBE);
+                               c.putCurrent(updateDBE);
+                               c.close();
+                               t.commit();
+                       }else{
+                               c.close();
+                               t.abort();
+                       }
+                       
+                       Logger.minor(this, "Get key: "+chk);
+                   Logger.minor(this, "Headers: "+header.length+" bytes, hash 
"+header);
+                   Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+data);
+                       
+               }catch(SSKVerifyException ex){
+                       Logger.normal(this, "Does not verify, setting 
accessTime to 0 for : "+chk, ex);
+                       storeBlock.setRecentlyUsedToZero();
+                       DatabaseEntry updateDBE = new DatabaseEntry();
+                       storeBlockTupleBinding.objectToEntry(storeBlock, 
updateDBE);
+                       c.putCurrent(updateDBE);
+                       c.close();
+                       t.commit();
+                   return null;
+               }
+               return block;
+       }catch(Throwable ex) {  // FIXME: ugly  
+               if(c!=null) {
+                       try{c.close();}catch(DatabaseException ex2){}
+               }
+               if(t!=null) {
+                       try{t.abort();}catch(DatabaseException ex2){}
+               }
+               Logger.error(this, "Caught "+ex, ex);
+               ex.printStackTrace();
+               throw new IOException(ex.getMessage());
+        }
+       
+//     return null;
+    }
+
+    // FIXME do this with interfaces etc.
+    
+       /**
+     * Retrieve a block.
+     * @param dontPromote If true, don't promote data if fetched.
+     * @return null if there is no such block stored, otherwise the block.
+     */
+    public DSAPublicKey fetchPubKey(byte[] hash, boolean dontPromote) throws 
IOException
+    {
+       if(closed)
+               return null;
+       
+       DatabaseEntry routingkeyDBE = new DatabaseEntry(hash);
+       DatabaseEntry blockDBE = new DatabaseEntry();
+       Cursor c = null;
+       Transaction t = null;
+       try{
+               t = environment.beginTransaction(null,null);
+               c = chkDB.openCursor(t,null);
+               
+               if(c.getSearchKey(routingkeyDBE,blockDBE,LockMode.RMW)
+                               !=OperationStatus.SUCCESS) {
+                       c.close();
+                       t.abort();
+                       return null;
+               }
+
+               StoreBlock storeBlock = (StoreBlock) 
storeBlockTupleBinding.entryToObject(blockDBE);
+                               
+               DSAPublicKey block = null;
+               
+                       byte[] data = new byte[dataBlockSize];
+                       Logger.minor(this, "Reading from store... 
"+storeBlock.offset);
+                       synchronized(chkStore) {
+                               
chkStore.seek(storeBlock.offset*(long)(dataBlockSize+headerBlockSize));
+                               chkStore.readFully(data);
+                       }
+                       Logger.minor(this, "Read");
+                       
+                       try {
+                               block = new DSAPublicKey(data);
+                       } catch (IOException e) {
+                               Logger.error(this, "Could not read key");
+                               c.close();
+                               t.abort();
+                               return null;
+                       }
+                       
+                       if(!Arrays.equals(block.asBytesHash(), hash)) {
+                               Logger.normal(this, "Does not verify, setting 
accessTime to 0 for : "+HexUtil.bytesToHex(hash));
+                               storeBlock.setRecentlyUsedToZero();
+                               DatabaseEntry updateDBE = new DatabaseEntry();
+                               
storeBlockTupleBinding.objectToEntry(storeBlock, updateDBE);
+                               c.putCurrent(updateDBE);
+                               c.close();
+                               t.commit();
+                           return null;
+                       }
+                       
+                       if(!dontPromote)
+                       {
+                               storeBlock.updateRecentlyUsed();
+                               DatabaseEntry updateDBE = new DatabaseEntry();
+                               
storeBlockTupleBinding.objectToEntry(storeBlock, updateDBE);
+                               c.putCurrent(updateDBE);
+                               c.close();
+                               t.commit();
+                       }else{
+                               c.close();
+                               t.abort();
+                       }
+                       
+                       Logger.minor(this, "Get key: 
"+HexUtil.bytesToHex(hash));
+                   Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+data);
+                       
+               return block;
+       }catch(Throwable ex) {  // FIXME: ugly  
+               if(c!=null) {
+                       try{c.close();}catch(DatabaseException ex2){}
+               }
+               if(t!=null) {
+                       try{t.abort();}catch(DatabaseException ex2){}
+               }
+               Logger.error(this, "Caught "+ex, ex);
+               ex.printStackTrace();
+               throw new IOException(ex.getMessage());
+        }
+       
+//     return null;
+    }
+
     /**
      * Store a block.
      */
-    public void put(CHKBlock block) throws IOException
+    public void put(KeyBlock block) throws IOException
     {          
        if(closed)
                return;

-       byte[] routingkey = ((NodeCHK)block.getKey()).getRoutingKey();
-        byte[] data = block.getData();
-        byte[] header = block.getHeader();
+       byte[] routingkey = block.getKey().getRoutingKey();
+        byte[] data = block.getRawData();
+        byte[] header = block.getRawHeaders();

-        if(data.length!=CHK_DATA_BLOCK_SIZE) {
-               Logger.minor(this, "This data is "+data.length+" bytes. Should 
be "+CHK_DATA_BLOCK_SIZE);
+        if(data.length!=dataBlockSize) {
+               Logger.error(this, "This data is "+data.length+" bytes. Should 
be "+dataBlockSize);
                return;
         }
-        if(header.length!=CHK_HEADER_BLOCK_SIZE) {
-               Logger.minor(this, "This header is "+data.length+" bytes. 
Should be "+CHK_HEADER_BLOCK_SIZE);
+        if(header.length!=headerBlockSize) {
+               Logger.error(this, "This header is "+data.length+" bytes. 
Should be "+headerBlockSize);
                return;
         }

@@ -231,7 +425,7 @@
                synchronized(chkStore) {
                        if(chkBlocksInStore<maxChkBlocks) {
                                // Expand the store file
-                               int byteOffset = 
chkBlocksInStore*(CHK_DATA_BLOCK_SIZE+CHK_HEADER_BLOCK_SIZE);
+                               int byteOffset = 
chkBlocksInStore*(dataBlockSize+headerBlockSize);
                                StoreBlock storeBlock = new 
StoreBlock(chkBlocksInStore);
                                DatabaseEntry blockDBE = new DatabaseEntry();
                        storeBlockTupleBinding.objectToEntry(storeBlock, 
blockDBE);
@@ -254,7 +448,7 @@
                                DatabaseEntry blockDBE = new DatabaseEntry();
                                
storeBlockTupleBinding.objectToEntry(storeBlock, blockDBE);
                                chkDB.put(t,routingkeyDBE,blockDBE);
-                       
chkStore.seek(storeBlock.getOffset()*(long)(CHK_DATA_BLOCK_SIZE+CHK_HEADER_BLOCK_SIZE));
+                       
chkStore.seek(storeBlock.getOffset()*(long)(dataBlockSize+headerBlockSize));
                        chkStore.write(header);
                        chkStore.write(data);
                                t.commit();
@@ -265,7 +459,7 @@
                Logger.minor(this, "Headers: "+header.length+" bytes, hash 
"+Fields.hashCode(header));
                Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+Fields.hashCode(data));

-        }catch(Exception ex) {  // FIXME: ugly  
+        }catch(Throwable ex) {  // FIXME: ugly  
                if(t!=null){
                        try{t.abort();}catch(DatabaseException ex2){};
                }
@@ -275,6 +469,72 @@
         }
     }

+    /**
+     * Store a block.
+     */
+    public void put(byte[] hash, DSAPublicKey key) throws IOException
+    {          
+       if(closed)
+               return;
+               
+       byte[] routingkey = hash;
+        byte[] data = key.asPaddedBytes();
+        
+        if(data.length!=dataBlockSize) {
+               Logger.error(this, "This data is "+data.length+" bytes. Should 
be "+dataBlockSize);
+               return;
+        }
+        
+        Transaction t = null;
+        
+        try{
+               t = environment.beginTransaction(null,null);
+               DatabaseEntry routingkeyDBE = new DatabaseEntry(routingkey);
+               
+               synchronized(chkStore) {
+                       if(chkBlocksInStore<maxChkBlocks) {
+                               // Expand the store file
+                               int byteOffset = 
chkBlocksInStore*(dataBlockSize+headerBlockSize);
+                               StoreBlock storeBlock = new 
StoreBlock(chkBlocksInStore);
+                               DatabaseEntry blockDBE = new DatabaseEntry();
+                       storeBlockTupleBinding.objectToEntry(storeBlock, 
blockDBE);
+                       chkDB.put(t,routingkeyDBE,blockDBE);
+                       chkStore.seek(byteOffset);
+                       chkStore.write(data);
+                       t.commit();
+                       chkBlocksInStore++;
+                       }else{
+                               // Overwrite an other block
+                               Cursor c = chkDB_accessTime.openCursor(t,null);
+                               DatabaseEntry keyDBE = new DatabaseEntry();
+                               DatabaseEntry dataDBE = new DatabaseEntry();
+                               c.getFirst(keyDBE,dataDBE,null);
+                               StoreBlock oldStoreBlock = (StoreBlock) 
storeBlockTupleBinding.entryToObject(dataDBE);
+                               c.delete();
+                               c.close();
+                               StoreBlock storeBlock = new 
StoreBlock(oldStoreBlock.getOffset());
+                               DatabaseEntry blockDBE = new DatabaseEntry();
+                               
storeBlockTupleBinding.objectToEntry(storeBlock, blockDBE);
+                               chkDB.put(t,routingkeyDBE,blockDBE);
+                       
chkStore.seek(storeBlock.getOffset()*(long)(dataBlockSize+headerBlockSize));
+                       chkStore.write(data);
+                               t.commit();
+                       }
+               }
+               
+               Logger.minor(this, "Put key: "+HexUtil.bytesToHex(hash));
+               Logger.minor(this, "Data: "+data.length+" bytes, hash 
"+Fields.hashCode(data));
+                
+        }catch(Throwable ex) {  // FIXME: ugly  
+               if(t!=null){
+                       try{t.abort();}catch(DatabaseException ex2){};
+               }
+               Logger.error(this, "Caught "+ex, ex);
+               ex.printStackTrace();
+               throw new IOException(ex.getMessage());
+        }
+    }
+    
     private class StoreBlock
     {
        private long recentlyUsed;

Deleted: branches/freenet-freejvms/src/freenet/store/DataStore.java
===================================================================
--- branches/freenet-freejvms/src/freenet/store/DataStore.java  2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/store/DataStore.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,243 +0,0 @@
-/*
- * Dijjer - A Peer to Peer HTTP Cache
- * Copyright (C) 2004,2005 Change.Tv, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-package freenet.store;
-
-import freenet.keys.Key;
-import freenet.support.Logger;
-
-import java.io.*;
-import java.util.*;
-
-public class DataStore extends Store {
-
-    public class ATimeComparator implements Comparator {
-
-               public int compare(Object arg0, Object arg1) {
-                       DataBlock db0 = (DataBlock) arg0;
-                       DataBlock db1 = (DataBlock) arg1;
-                       long a0 = db0.getLastAccessTime();
-                       long a1 = db1.getLastAccessTime();
-                       if(a0 < a1) return -1;
-                       if(a0 > a1) return 1;
-                       return 0;
-               }
-
-       }
-
-       public static final String VERSION = "$Id: DataStore.java,v 1.5 
2005/08/20 21:21:21 amphibian Exp $";    
-
-       private RandomAccessFile _index;
-       private final int blockSize;
-
-       public DataStore(RandomAccessFile indexFile, RandomAccessFile dataFile, 
int blockSize, long maxBlocks) throws Exception {
-           super(dataFile, maxBlocks);
-           _index = indexFile;
-           this.blockSize = blockSize;
-           readStore();
-       }
-       
-       public DataStore(File index, File data, int blockSize, long maxBlocks) 
throws Exception {
-               super(data, maxBlocks);
-               _index = new RandomAccessFile(index, "rw");
-               this.blockSize = blockSize;
-               readStore();
-       }
-
-       public synchronized void shutdown(boolean exit) throws IOException {
-               super.shutdown(exit);
-               _index.close();
-       }
-       
-       protected synchronized void readStore() throws IOException {
-               _index.seek(0);
-               int recordNum = 0;
-
-               Vector v = new Vector();
-               
-               try {
-               while (_index.getFilePointer() < _index.length()) {
-
-                   Key k = Key.read(_index);
-                   long atime = _index.readLong();
-                       DataBlock dataBlock = new DataBlock(recordNum,
-                           k, atime);
-
-                       getKeyMap().put(dataBlock.getKey(), dataBlock);
-                       getRecordNumberList().add(recordNum, dataBlock);
-                       v.add(dataBlock);
-                       recordNum++;
-               }
-               } catch (EOFException e) {
-                       // Chopped off in the middle of a key
-                       Logger.normal(this, "Store index truncated");
-                       return;
-               } finally {
-                       DataBlock[] blocks = (DataBlock[]) v.toArray(new 
DataBlock[v.size()]);
-                       Arrays.sort(blocks, new ATimeComparator());
-                       for(int i=0;i<blocks.length;i++) {
-                               updateLastAccess(blocks[i]);
-                       }
-               }
-       }
-
-       protected void deleteBlock(Block block, boolean wipeFromDisk) throws 
IOException {
-               DataBlock dataBlock = (DataBlock) block;
-               getKeyMap().remove(dataBlock.getKey());
-               getAccessTimeList().remove(dataBlock);
-               if (wipeFromDisk) {
-                       DataBlock lastDataBlock = getLastBlock();
-                       setRecordNumber(dataBlock.getRecordNumber(), 
lastDataBlock);
-                       _index.setLength(_index.length() - 
DataBlock.SIZE_ON_DISK);
-                       getBlockStore().setLength(getBlockStore().length() - 
blockSize);
-               } else {
-                       
getRecordNumberList().remove(dataBlock.getRecordNumber());
-               }
-       }
-
-       public synchronized void addDataAsBlock(Key key, byte[] data) throws 
IOException {
-               if (getKeyMap().containsKey(key)) {
-                   Logger.minor(this, "Already have key: "+key);
-                       return;
-               }
-
-               if (_index.length() / DataBlock.SIZE_ON_DISK < getMaxBlocks()) {
-                       int recnum = (int) (_index.length() / 
DataBlock.SIZE_ON_DISK);
-                       createAndOverwrite(recnum, key, data);
-               } else {
-                       DataBlock oldest = (DataBlock) 
getAccessTimeList().getFirst();
-                       deleteBlock(oldest, false);
-
-                       int recNo = oldest.getRecordNumber();
-                       createAndOverwrite(recNo, key, data);
-               }
-       }
-
-       /**
-        * Moves this record to a new position, overwriting whatever on-disk 
data was there previously, but not deleting the
-        * old on-disk data for this record.
-        *
-        */
-       protected void setRecordNumber(int newRecNo, DataBlock dataBlock) 
throws IOException {
-               if (newRecNo == dataBlock.getRecordNumber()) {
-                       return;
-               }
-               _index.seek((long)newRecNo * DataBlock.SIZE_ON_DISK);
-               dataBlock.getKey().write(_index);
-               _index.writeLong(dataBlock.getLastAccessTime());
-
-               byte[] ba = new byte[blockSize];
-               getBlockStore().seek(dataBlock.positionInDataFile());
-               getBlockStore().readFully(ba);
-               getBlockStore().seek((long)newRecNo * blockSize);
-               getBlockStore().write(ba);
-
-               getRecordNumberList().remove(dataBlock.getRecordNumber());
-               dataBlock.setRecordNumber(newRecNo);
-               getRecordNumberList().add(newRecNo, dataBlock);
-
-
-       }
-
-       /**
-        * Creates a new block, overwriting the data on disk for an existing 
block
-        * (but *not* deleting that block from RAM)
-        */
-       private void createAndOverwrite(int recnum, Key key, byte[] data) 
throws IOException {
-           Logger.minor(this, "createAndOverwrite("+recnum+","+key+")");
-               DataBlock b = new DataBlock(recnum, key, 
System.currentTimeMillis());
-               _index.seek((long)recnum * DataBlock.SIZE_ON_DISK);
-
-               key.write(_index);
-               getKeyMap().put(key, b);
-
-               _index.writeLong(b.getLastAccessTime());
-               getAccessTimeList().addLast(b);
-
-               getBlockStore().seek((long)recnum * blockSize);
-               getBlockStore().write(data);
-               getRecordNumberList().add(recnum, b);
-       }
-
-       public synchronized byte[] getDataForBlock(Key key, boolean 
dontPromote) throws IOException {
-               DataBlock b = getBlockByKey(key);
-               if (b == null) {
-                       return null;
-               } else {
-                   Logger.minor(this, "Reading block: "+b.getRecordNumber());
-                       return readData(b, dontPromote);
-               }
-       }
-
-       public Set getAllKeys() { 
-               return ((Map)getKeyMap().clone()).keySet();
-       }
-       
-       private byte[] readData(DataBlock dataBlock, boolean dontPromote) 
throws IOException {
-               byte[] ba = new byte[blockSize];
-               getBlockStore().seek(dataBlock.positionInDataFile());
-               getBlockStore().readFully(ba);
-               dataBlock.setLastAccessTime(System.currentTimeMillis()) ;
-
-               if(!dontPromote) {
-                       getAccessTimeList().remove(dataBlock);
-                       getAccessTimeList().addLast(dataBlock);
-                       _index.seek(dataBlock.positionInIndexFile() + 
DataBlock.KEY_SIZE);
-                       _index.writeLong(dataBlock.getLastAccessTime());
-               }
-               return ba;
-
-       }
-
-       
-       private DataBlock getBlockByKey(Key key) {
-               return (DataBlock) getKeyMap().get(key);
-       }
-
-       public DataBlock getLastBlock() {
-               return (DataBlock) getRecordNumberList().lastElement();
-       }
-
-       public int getCacheSize() {
-               return getAccessTimeList().size();
-       }
-
-       class DataBlock extends Block {
-
-           public static final String VERSION = "$Id: DataStore.java,v 1.5 
2005/08/20 21:21:21 amphibian Exp $";
-
-               private static final short KEY_SIZE = Key.KEY_SIZE_ON_DISK;
-               private static final short ACCESS_TIME_SIZE = 8;
-               private static final short SIZE_ON_DISK = KEY_SIZE + 
ACCESS_TIME_SIZE;
-
-               public DataBlock(int recordNum, Key key, long accessTime) {
-                       super(recordNum, key, accessTime);
-               }
-
-               public long positionInIndexFile() {
-                       /* key + 8 byte last access time */
-                       return getRecordNumber() * SIZE_ON_DISK;
-               }
-
-               public long positionInDataFile() {
-                       return getRecordNumber() * blockSize;
-               }
-       }
-}
-

Modified: branches/freenet-freejvms/src/freenet/store/FreenetStore.java
===================================================================
--- branches/freenet-freejvms/src/freenet/store/FreenetStore.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/store/FreenetStore.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -2,8 +2,13 @@

 import java.io.IOException;

+import freenet.crypt.DSAPublicKey;
 import freenet.keys.CHKBlock;
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
 import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;
+import freenet.keys.SSKBlock;

 /**
  * Datastore interface
@@ -15,10 +20,27 @@
      * @param dontPromote If true, don't promote data if fetched.
      * @return null if there is no such block stored, otherwise the block.
      */
-    public CHKBlock fetch(NodeCHK chk, boolean dontPromote) throws IOException;
+    public CHKBlock fetch(NodeCHK key, boolean dontPromote) throws IOException;

     /**
+     * Retrieve a block.
+     * @param dontPromote If true, don't promote data if fetched.
+     * @return null if there is no such block stored, otherwise the block.
+     */
+    public SSKBlock fetch(NodeSSK key, boolean dontPromote) throws IOException;
+
+    /**
+     * Fetch a public key.
+     */
+    public DSAPublicKey fetchPubKey(byte[] hash, boolean dontPromote) throws 
IOException;
+    
+    /**
      * Store a block.
      */
-    public void put(CHKBlock block) throws IOException;
-}
\ No newline at end of file
+    public void put(KeyBlock block) throws IOException;
+    
+    /**
+     * Store a public key.
+     */
+    public void put(byte[] hash, DSAPublicKey key) throws IOException;
+}

Modified: branches/freenet-freejvms/src/freenet/support/BucketTools.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/BucketTools.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/BucketTools.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -25,6 +25,7 @@
 import java.security.NoSuchAlgorithmException;
 import gnu.crypto.Registry;

+import freenet.support.io.FileBucket;

 /**
  * Helper functions for working with Buckets.
@@ -410,6 +411,32 @@
                }
        }

+       /** Copy data from an InputStream into a Bucket. */
+       public static void copyFrom(Bucket bucket, InputStream is, long 
truncateLength) throws IOException {
+               OutputStream os = bucket.getOutputStream();
+               byte[] buf = new byte[4096];
+               if(truncateLength < 0) truncateLength = Long.MAX_VALUE;
+               try {
+                       long moved = 0;
+                       while(moved < truncateLength) {
+                               // DO NOT move the (int) inside the Math.min()! 
big numbers truncate to negative numbers.
+                               int bytes = (int) Math.min(buf.length, 
truncateLength - moved);
+                               if(bytes <= 0)
+                                       throw new 
IllegalStateException("bytes="+bytes+", truncateLength="+truncateLength+", 
moved="+moved);
+                               bytes = is.read(buf, 0, bytes);
+                               if(bytes <= 0) {
+                                       if(truncateLength == Long.MAX_VALUE)
+                                               break;
+                                       throw new IOException("Could not move 
required quantity of data: "+bytes+" (moved "+moved+" of "+truncateLength+")");
+                               }
+                               os.write(buf, 0, bytes);
+                               moved += bytes;
+                       }
+               } finally {
+                       os.close();
+               }
+       }
+
        /**
         * Split the data into a series of read-only Bucket's.
         * @param origData The original data Bucket.
@@ -424,6 +451,9 @@
         * the provided bucket, or writing to created buckets.
         */
        public static Bucket[] split(Bucket origData, int splitSize, 
BucketFactory bf) throws IOException {
+               if(origData instanceof FileBucket) {
+                       return ((FileBucket)origData).split(splitSize);
+               }
                long length = origData.size();
                if(length > ((long)Integer.MAX_VALUE) * splitSize)
                        throw new IllegalArgumentException("Way too big!: 
"+length+" for "+splitSize);

Modified: branches/freenet-freejvms/src/freenet/support/FileLoggerHook.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/FileLoggerHook.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/FileLoggerHook.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -158,7 +158,7 @@
        protected String getHourLogName(Calendar c, boolean compressed) {
                StringBuffer buf = new StringBuffer(50);
                buf.append(baseFilename).append('-');
-               buf.append(Version.buildNumber);
+               buf.append(Version.buildNumber());
                buf.append('-');
                buf.append(c.get(Calendar.YEAR)).append('-');
                pad2digits(buf, c.get(Calendar.MONTH) + 1);
@@ -501,8 +501,8 @@
                                        }
                                }
                                // First field: version
-                               if(nums[0] != Version.buildNumber) {
-                                       Logger.minor(this, "Deleting old log 
from build "+nums[0]+", current="+Version.buildNumber);
+                               if(nums[0] != Version.buildNumber()) {
+                                       Logger.minor(this, "Deleting old log 
from build "+nums[0]+", current="+Version.buildNumber());
                                        // Logs that old are useless
                                        f.delete();
                                        continue;

Copied: branches/freenet-freejvms/src/freenet/support/HTMLDecoder.java (from 
rev 7998, trunk/freenet/src/freenet/support/HTMLDecoder.java)

Copied: branches/freenet-freejvms/src/freenet/support/HTMLEncoder.java (from 
rev 7998, trunk/freenet/src/freenet/support/HTMLEncoder.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/ImmutableByteArrayWrapper.java 
(from rev 7998, 
trunk/freenet/src/freenet/support/ImmutableByteArrayWrapper.java)

Copied: branches/freenet-freejvms/src/freenet/support/IntNumberedItem.java 
(from rev 7998, trunk/freenet/src/freenet/support/IntNumberedItem.java)

Copied: branches/freenet-freejvms/src/freenet/support/LimitedEnumeration.java 
(from rev 7998, trunk/freenet/src/freenet/support/LimitedEnumeration.java)

Copied: branches/freenet-freejvms/src/freenet/support/MultiValueTable.java 
(from rev 7998, trunk/freenet/src/freenet/support/MultiValueTable.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/NumberedItemComparator.java (from 
rev 7998, trunk/freenet/src/freenet/support/NumberedItemComparator.java)

Modified: branches/freenet-freejvms/src/freenet/support/NumberedRecentItems.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/NumberedRecentItems.java      
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/NumberedRecentItems.java      
2006-02-03 22:55:27 UTC (rev 7999)
@@ -28,56 +28,7 @@
         items = new NumberedItem[maxSize];
         count = 0;
         wrapAround = wrap;
-        myComparator = 
-            new Comparator() {
-
-            public int compare(Object o1, Object o2) {
-                int x = ocompare(o1, o2);
-                Logger.minor(this, "compare("+o1+","+o2+") = "+x);
-                return x;
-            }
-            
-            public int ocompare(Object o1, Object o2) {
-                // Nulls at the end of the list
-                if(o1 == null && o2 == null)
-                    return 0; // null == null
-                if(o1 != null && o2 == null)
-                    return 1; // anything > null
-                if(o2 != null && o1 == null)
-                    return -1;
-                long i1, i2;
-                if(o1 instanceof NumberedItem)
-                    i1 = ((NumberedItem)o1).getNumber();
-                else if(o1 instanceof Long)
-                    i1 = ((Long)o1).longValue();
-                else throw new ClassCastException(o1.toString());
-                if(o2 instanceof NumberedItem)
-                    i2 = ((NumberedItem)o2).getNumber();
-                else if(o2 instanceof Long)
-                    i2 = ((Long)o2).longValue();
-                else throw new ClassCastException(o2.toString());
-                if(i1 == i2) return 0;
-                if(!wrapAround) {
-                    if(i1 > i2) return 1;
-                    else return -1;
-                } else {
-                    long firstDistance, secondDistance;
-                    if(i1 > i2) {
-                        firstDistance = i1 - i2; // smaller => i1 > i2
-                        secondDistance = i2 + Long.MAX_VALUE - i1; // smaller 
=> i2 > i1
-                    } else {
-                        secondDistance = i2 - i1; // smaller => i2 > i1
-                        firstDistance = i1 + Long.MAX_VALUE - i2; // smaller 
=> i1 > i2
-                    }
-                    if(Math.abs(firstDistance) < Math.abs(secondDistance)) {
-                        return 1; // i1>i2
-                    } else //if(Math.abs(secondDistance) < 
Math.abs(firstDistance)) {
-                        return -1; // i2>i1
-                    // REDFLAG: base must be odd, so we never get ==
-                }
-            }
-            
-        };
+        myComparator = new NumberedItemComparator(wrap);
     }

     public synchronized NumberedItem get(int num) {

Modified: 
branches/freenet-freejvms/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
===================================================================
--- 
branches/freenet-freejvms/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
 2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
 2006-02-03 22:55:27 UTC (rev 7999)
@@ -20,7 +20,7 @@

        private final Bucket bucket;
        private final int minPaddedSize;
-       private final MersenneTwister paddingSource;
+       private final RandomSource origRandom;
        private final Rijndael aes;
        private long dataLength;
        private boolean readOnly;
@@ -35,6 +35,7 @@
         * @throws UnsupportedCipherException 
         */
        public PaddedEphemerallyEncryptedBucket(Bucket bucket, int minSize, 
RandomSource origRandom) throws UnsupportedCipherException {
+               this.origRandom = origRandom;
                this.bucket = bucket;
                if(bucket.size() != 0) throw new 
IllegalArgumentException("Bucket must be empty");
                aes = new Rijndael(256, 256);
@@ -44,7 +45,6 @@
                // Might as well blank it
                for(int i=0;i<key.length;i++) key[i] = 0;
                this.minPaddedSize = minSize;
-               paddingSource = new MersenneTwister(origRandom.nextLong());
                readOnly = false;
                lastOutputStream = 0;
        }
@@ -107,6 +107,7 @@
                                        return;
                                }
                                
synchronized(PaddedEphemerallyEncryptedBucket.this) {
+                                       MersenneTwister paddingSource = new 
MersenneTwister(origRandom.nextLong());
                                        long finalLength = paddedLength();
                                        long padding = finalLength - dataLength;
                                        byte[] buf = new byte[4096];

Copied: branches/freenet-freejvms/src/freenet/support/RandomGrabArray.java 
(from rev 7998, trunk/freenet/src/freenet/support/RandomGrabArray.java)

Copied: branches/freenet-freejvms/src/freenet/support/RandomGrabArrayItem.java 
(from rev 7998, trunk/freenet/src/freenet/support/RandomGrabArrayItem.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/RandomGrabArrayWithClient.java 
(from rev 7998, 
trunk/freenet/src/freenet/support/RandomGrabArrayWithClient.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/RandomGrabArrayWithInt.java (from 
rev 7998, trunk/freenet/src/freenet/support/RandomGrabArrayWithInt.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/ReadOnlyFileSliceBucket.java 
(from rev 7998, trunk/freenet/src/freenet/support/ReadOnlyFileSliceBucket.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/SectoredRandomGrabArray.java 
(from rev 7998, trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/SectoredRandomGrabArrayWithInt.java
 (from rev 7998, 
trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithInt.java)

Modified: branches/freenet-freejvms/src/freenet/support/Serializer.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/Serializer.java       
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/Serializer.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -29,6 +29,8 @@
 import freenet.io.WritableToDataOutputStream;
 import freenet.io.comm.Peer;
 import freenet.keys.Key;
+import freenet.keys.NodeCHK;
+import freenet.keys.NodeSSK;

 /**
  * @author ian
@@ -79,6 +81,12 @@
                        return new Peer(dis);
                } else if (type.equals(BitArray.class)) {
                        return new BitArray(dis);
+               } else if (type.equals(NodeCHK.class)) {
+                       // Use Key.read(...) because write(...) writes the TYPE 
field.
+                       return (NodeCHK) Key.read(dis);
+               } else if (type.equals(NodeSSK.class)) {
+                       // Use Key.read(...) because write(...) writes the TYPE 
field.
+                       return (NodeSSK) Key.read(dis);
                } else if (type.equals(Key.class)) {
                    return Key.read(dis);
                } else {

Modified: branches/freenet-freejvms/src/freenet/support/SimpleFieldSet.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/SimpleFieldSet.java   
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/SimpleFieldSet.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -11,6 +11,10 @@
 import java.util.Map;
 import java.util.Set;

+import java.util.Vector;
+
+import freenet.support.io.LineReader;
+
 /**
  * @author amphibian
  * 
@@ -20,12 +24,18 @@
 public class SimpleFieldSet {

     final Map map;
+    String endMarker;

     public SimpleFieldSet(BufferedReader br) throws IOException {
         map = new HashMap();
         read(br);
     }

+    public SimpleFieldSet(LineReader lis, int maxLineLength, int 
lineBufferSize) throws IOException {
+       map = new HashMap();
+       read(lis, maxLineLength, lineBufferSize);
+    }
+    
     /**
      * Empty constructor
      */
@@ -45,6 +55,20 @@
     }

     /**
+     * Construct from a string[].
+     * @throws IOException if the string is too short or invalid.
+     */
+    public SimpleFieldSet(String[] content) throws IOException {
+        map = new HashMap();
+        String content2=new String();
+        for(int i=0;i<content.length;i++)
+               content2.concat(content[i]+";");
+        StringReader sr = new StringReader(content2);
+        BufferedReader br = new BufferedReader(sr);
+           read(br);
+    }
+    
+    /**
      * Read from disk
      * Format:
      * blah=blah
@@ -67,19 +91,73 @@
                 String after = line.substring(index+1);
                 map.put(before, after);
             } else {
-                if(line.equals("End")) return;
-                throw new IOException("Unknown end-marker: \""+line+"\"");
+               endMarker = line;
+               return;
             }

         }
     }

+    /**
+     * Read from disk
+     * Format:
+     * blah=blah
+     * blah=blah
+     * End
+     */
+    private void read(LineReader br, int maxLength, int bufferSize) throws 
IOException {
+        boolean firstLine = true;
+        while(true) {
+            String line = br.readLine(maxLength, bufferSize);
+            if(line == null) {
+                if(firstLine) throw new EOFException();
+                throw new IOException();
+            }
+            firstLine = false;
+            int index = line.indexOf('=');
+            if(index >= 0) {
+                // Mapping
+                String before = line.substring(0, index);
+                String after = line.substring(index+1);
+                map.put(before, after);
+            } else {
+               endMarker = line;
+               return;
+            }
+            
+        }
+    }
+
+    
     public String get(String key) {
         return (String) map.get(key);
     }
+    
+    public String[] getAll(String key) {
+       int index = key.indexOf(';');
+       if(index == -1) return null;
+       Vector v=new Vector();
+       v.removeAllElements();
+        while(index>0){
+            // Mapping
+            String before = key.substring(0, index);         
+            String after = key.substring(index+1);
+            v.addElement(before);
+            key=after;
+            index = key.indexOf(';');
+        }
+       
+       return (String[]) v.toArray();
+    }

     public void put(String key, String value) {
-        map.put(key, value);
+       String x = (String) map.get(key);
+       
+       if(x == null) {
+               map.put(key, value);
+       } else {
+               map.put(key, ((String)map.get(key))+";"+value);
+       }
     }

     /**
@@ -95,7 +173,10 @@
             String value = (String) entry.getValue();
             w.write(key+"="+value+"\n");
         }
-        w.write("End\n");
+        if(endMarker != null)
+               w.write(endMarker+"\n");
+        else
+               w.write("End\n");
     }

     public String toString() {
@@ -107,4 +188,12 @@
         }
         return sw.toString();
     }
+    
+    public String getEndMarker() {
+       return endMarker;
+    }
+    
+    public void setEndMarker(String s) {
+       endMarker = s;
+    }
 }

Copied: 
branches/freenet-freejvms/src/freenet/support/SimpleIntNumberedItemComparator.java
 (from rev 7998, 
trunk/freenet/src/freenet/support/SimpleIntNumberedItemComparator.java)

Copied: branches/freenet-freejvms/src/freenet/support/SortedVectorByNumber.java 
(from rev 7998, trunk/freenet/src/freenet/support/SortedVectorByNumber.java)

Copied: branches/freenet-freejvms/src/freenet/support/URLDecoder.java (from rev 
7998, trunk/freenet/src/freenet/support/URLDecoder.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/URLEncodedFormatException.java 
(from rev 7998, 
trunk/freenet/src/freenet/support/URLEncodedFormatException.java)

Copied: branches/freenet-freejvms/src/freenet/support/URLEncoder.java (from rev 
7998, trunk/freenet/src/freenet/support/URLEncoder.java)

Modified: 
branches/freenet-freejvms/src/freenet/support/UpdatableSortedLinkedList.java
===================================================================
--- 
branches/freenet-freejvms/src/freenet/support/UpdatableSortedLinkedList.java    
    2006-02-03 22:35:15 UTC (rev 7998)
+++ 
branches/freenet-freejvms/src/freenet/support/UpdatableSortedLinkedList.java    
    2006-02-03 22:55:27 UTC (rev 7999)
@@ -2,8 +2,6 @@

 import java.util.Enumeration;

-import freenet.node.RequestStarterClient;
-
 /**
  * @author amphibian
  * 

Modified: branches/freenet-freejvms/src/freenet/support/io/FileBucket.java
===================================================================
--- branches/freenet-freejvms/src/freenet/support/io/FileBucket.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/freenet/support/io/FileBucket.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -11,6 +11,7 @@
 import freenet.crypt.RandomSource;
 import freenet.support.Bucket;
 import freenet.support.Logger;
+import freenet.support.ReadOnlyFileSliceBucket;

 /**
  * A file Bucket is an implementation of Bucket that writes to a file.
@@ -299,4 +300,19 @@
        public void dontDeleteOnFinalize() {
                deleteOnFinalize = false;
        }
+
+       public Bucket[] split(int splitSize) {
+               if(length > ((long)Integer.MAX_VALUE) * splitSize)
+                       throw new IllegalArgumentException("Way too big!: 
"+length+" for "+splitSize);
+               int bucketCount = (int) (length / splitSize);
+               if(length % splitSize > 0) bucketCount++;
+               Bucket[] buckets = new Bucket[bucketCount];
+               for(int i=0;i<buckets.length;i++) {
+                       long startAt = i * splitSize;
+                       long endAt = Math.min((i+1) * splitSize, length);
+                       long len = endAt - startAt;
+                       buckets[i] = new ReadOnlyFileSliceBucket(file, startAt, 
len);
+               }
+               return buckets;
+       }
 }

Copied: branches/freenet-freejvms/src/freenet/support/io/LineReader.java (from 
rev 7998, trunk/freenet/src/freenet/support/io/LineReader.java)

Copied: 
branches/freenet-freejvms/src/freenet/support/io/LineReadingInputStream.java 
(from rev 7998, 
trunk/freenet/src/freenet/support/io/LineReadingInputStream.java)

Copied: branches/freenet-freejvms/src/freenet/support/io/TooLongException.java 
(from rev 7998, trunk/freenet/src/freenet/support/io/TooLongException.java)

Modified: branches/freenet-freejvms/src/net/i2p/util/NativeBigInteger.java
===================================================================
--- branches/freenet-freejvms/src/net/i2p/util/NativeBigInteger.java    
2006-02-03 22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/net/i2p/util/NativeBigInteger.java    
2006-02-03 22:55:27 UTC (rev 7999)
@@ -98,15 +98,17 @@
      */
     private static final boolean _doLog = true;

-    private final static String JBIGI_OPTIMIZATION_K6         = "k6";
-    private final static String JBIGI_OPTIMIZATION_K6_2       = "k62";
-    private final static String JBIGI_OPTIMIZATION_K6_3       = "k63";
-    private final static String JBIGI_OPTIMIZATION_ATHLON   = "athlon";
-    private final static String JBIGI_OPTIMIZATION_PENTIUM    = "pentium";
-    private final static String JBIGI_OPTIMIZATION_PENTIUMMMX = "pentiummmx";
-    private final static String JBIGI_OPTIMIZATION_PENTIUM2 = "pentium2";
-    private final static String JBIGI_OPTIMIZATION_PENTIUM3 = "pentium3";
-    private final static String JBIGI_OPTIMIZATION_PENTIUM4 = "pentium4";
+    private final static String JBIGI_OPTIMIZATION_K6          = "k6";
+    private final static String JBIGI_OPTIMIZATION_K6_2        = "k62";
+    private final static String JBIGI_OPTIMIZATION_K6_3                = "k63";
+    private final static String JBIGI_OPTIMIZATION_ATHLON      = "athlon";
+    private final static String JBIGI_OPTIMIZATION_X86_64      = "x86_64";
+    private final static String JBIGI_OPTIMIZATION_PENTIUM     = "pentium";
+    private final static String JBIGI_OPTIMIZATION_PENTIUMMMX  = "pentiummmx";
+    private final static String JBIGI_OPTIMIZATION_PENTIUM2    = "pentium2";
+    private final static String JBIGI_OPTIMIZATION_PENTIUM3    = "pentium3";
+    private final static String JBIGI_OPTIMIZATION_PENTIUM4    = "pentium4";
+    private final static String JBIGI_OPTIMIZATION_PPC                 = "osx";

     private final static String sCPUType; //The CPU Type to optimize for (one 
of the above strings)

@@ -406,7 +408,7 @@
         URL resource = 
NativeBigInteger.class.getClassLoader().getResource(resourceName);
         if (resource == null) {
             if (_doLog)
-                System.err.println("NOTICE: Resource name [" + resourceName + 
"] was not found");
+                System.err.println("NOTICE: Resource name [" + 
getResourceName(true) + "] was not found");
             return false;
         }

@@ -469,20 +471,26 @@
        boolean isWindows 
=(System.getProperty("os.name").toLowerCase().indexOf("windows") != -1);
        boolean isLinux 
=(System.getProperty("os.name").toLowerCase().indexOf("linux") != -1);
        boolean isFreebsd 
=(System.getProperty("os.name").toLowerCase().indexOf("freebsd") != -1);
+       boolean isMacOS 
=(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
        if(isWindows)
                        return "jbigi-windows"+sAppend; // The convention on 
Windows
                if(isLinux)
                        return "jbigi-linux"+sAppend; // The convention on 
linux...
                if(isFreebsd)
                        return "jbigi-freebsd"+sAppend; // The convention on 
freebsd...
+               if(isMacOS)
+                       return "jbigi-osx"+sAppend; // The convention on 
freebsd...
                throw new RuntimeException("Dont know jbigi library name for os 
type '"+System.getProperty("os.name")+"'");
     }
     private static final String getLibrarySuffix()
     {
        boolean isWindows 
=System.getProperty("os.name").toLowerCase().indexOf("windows") != -1;
+       boolean isMacOS 
=(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
        if(isWindows)
                return "dll";
-       else
+       else if(isMacOS)
+               return "jnilib";
+       else
                return "so";
     }
     private static final String getLibraryPrefix()

Copied: branches/freenet-freejvms/src/snmplib (from rev 7998, 
trunk/freenet/src/snmplib)

Deleted: branches/freenet-freejvms/src/snmplib/BERDecoder.java
===================================================================
--- trunk/freenet/src/snmplib/BERDecoder.java   2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/BERDecoder.java       2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,120 +0,0 @@
-package snmplib;
-
-import java.util.Stack;
-
-public class BERDecoder {
-       private byte[] buf;
-       private int ptr = 0;
-       private Stack seqStack;
-       
-       public BERDecoder(byte[] buf) {
-               this.buf = buf;
-               seqStack = new Stack();
-       }
-       
-       public void startSequence() throws BadFormatException {
-               startSequence((byte)0x30);
-       }
-       
-       public void startSequence(byte id) throws BadFormatException {
-               if (buf[ptr] != id)
-                       throw new BadFormatException("Unknown Sequence");
-               ptr++;
-               int len = readBERInt();
-               seqStack.push(new Integer(ptr + len));
-               seqStack.push(new Integer(len));
-       }
-       
-       public void endSequence() throws BadFormatException {
-               int length = ((Integer)seqStack.pop()).intValue();
-               int pos = ((Integer)seqStack.pop()).intValue();
-               if (pos != ptr)
-                       throw new BadFormatException("Wrong length of field " + 
-                                       length + ":" + pos + ":" + ptr);
-       }
-       
-       public byte peekRaw() {
-               return buf[ptr];
-       }
-       
-       public long[] fetchOID() throws BadFormatException {
-               startSequence((byte)0x06);
-               long[] ret = readOID();
-               endSequence();
-               return ret;
-       }
-       
-       private long[] readOID() throws BadFormatException {
-               if (buf[ptr] != 0x2b)
-                       throw new BadFormatException("Bad start of OID");
-               int inptr = ptr;
-               ptr++;
-               int length = ((Integer)seqStack.peek()).intValue();
-               if (length < 2)
-                       return new long[0];
-               long ret[] = new long[length]; // it won't getlonger then this
-               int i;
-               for(i = 0; i < length ; i++) {
-                       ret[i] = readBERInt();
-                       if ((ptr - inptr) >= length)
-                               break;
-               }
-               
-               if (i < length) { // Bring out the scissors
-                       long ret2[] = (long[])ret.clone();
-                       ret = new long[i + 1];
-                       for ( ; i >= 0 ; i--)
-                               ret[i] = ret2[i];
-               }
-               return ret;
-       }
-       
-       
-       public byte[] fetchOctetString() throws BadFormatException {
-               startSequence((byte)0x04);
-               byte[] ret = readOctetString();
-               endSequence();
-               return ret;
-       }
-       
-       private byte[] readOctetString() {
-               int length = ((Integer)seqStack.peek()).intValue();
-               byte ret[] = new byte[length];
-               for(int i = 0; i < length ; i++) {
-                       ret[i] = buf[ptr++];
-               }
-               return ret;
-       }
-
-       
-       public int fetchInt() throws BadFormatException {
-               startSequence((byte)0x02);
-               int ret = readInt();
-               endSequence();
-               return ret;
-       }
-       
-       private int readInt() {
-               int length = ((Integer)seqStack.peek()).intValue();
-               int ret = 0;
-               for ( ; length > 0 ; length--) {
-                       ret = ret * 256;
-                       ret = ret + ((((int)buf[ptr])+256)%256);
-                       ptr++;
-               }
-               return ret;
-       }
-       
-       
-       
-       private int readBERInt() {
-               int ret = 0;
-               do {
-                       ret = ret * 128;
-                       ret = ret + ((((int)buf[ptr])+128)%128);
-                       ptr++;
-               } while (buf[ptr-1] < 0);
-               return ret;
-       }
-
-}

Copied: branches/freenet-freejvms/src/snmplib/BERDecoder.java (from rev 7998, 
trunk/freenet/src/snmplib/BERDecoder.java)

Deleted: branches/freenet-freejvms/src/snmplib/BEREncoder.java
===================================================================
--- trunk/freenet/src/snmplib/BEREncoder.java   2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/BEREncoder.java       2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,181 +0,0 @@
-package snmplib;
-
-import java.util.Stack;
-import java.util.Vector;
-
-public class BEREncoder {
-       private IDVector fields;
-       private Stack fstack;
-       
-       public BEREncoder() {
-               this((byte)0x30);
-       }
-       
-       public BEREncoder(byte id) {
-               fields = new IDVector(id);
-               fstack = new Stack();
-               fstack.add(fields);
-       }
-       
-       
-       public int toBytes(byte[] buf) {
-               while (fields.size() > 1)
-                       endSequence();
-               
-               int len = vecToBytes(fields, buf, 0);
-               byte tmpbt;
-               // Remember.. this function writes backwards first!
-               for (int i = 0 ; i < len/2 ; i++) {
-                       tmpbt = buf[i];
-                       buf[i] = buf[len - 1 - i];
-                       buf[len - 1 - i] = tmpbt;
-               }
-                       
-               return len;
-       }
-       
-       private int vecToBytes(IDVector v, byte[] buf, int offset) {
-               int inoffset = offset;
-               for (int i = v.size() - 1 ; i >= 0 ; i--) {
-                       Object o = v.get(i); 
-//                     if (o instanceof Integer) {
-//                             int dlen = intToBytes(((Integer)o).intValue(), 
buf, offset);
-                       if (o instanceof Long) {
-                               int dlen = intToBytes(((Long)o).longValue(), 
buf, offset);
-                               offset += dlen;
-                               offset += intToBERBytes(dlen, buf, offset);
-                               buf[offset++] = 0x02;
-                       } else if (o instanceof IDVector) {
-                               int dlen = vecToBytes((IDVector)o, buf, offset);
-                               offset += dlen;
-                               offset += intToBERBytes(dlen, buf, offset);
-                               buf[offset++] = ((IDVector)o).getVid();
-                       } else if (o instanceof ByteArrWrapper) {
-                               byte[] barr = ((ByteArrWrapper)o).arr;
-                               for (int j = 0 ; j < barr.length ; j++)
-                                       buf[offset + j] = barr[barr.length - 1 
- j];
-                               offset += barr.length;
-                               offset += intToBERBytes(barr.length, buf, 
offset);
-                               buf[offset++] = ((ByteArrWrapper)o).id;
-                       }
-//                             myoffset += intToBytes(v.get(i), buf, myoffset);
-
-               }
-               
-               return (offset - inoffset);
-       }
-       
-       
-       private int intToBytes(long i, byte[] buf, int offset) {
-               // TODO: handle negative numbers also!!!!
-               int inoffset = offset;
-               if (i == 0) {
-                       buf[offset++] = 0;
-               } else {
-                       for (; i > 0 ; i = i / 256) {
-                               buf[offset] = (byte)(i % 256);
-                               offset++;
-                       }
-               }
-               // make the number unsigned
-               if (buf[offset-1]<0)
-                       buf[offset++] = 0;
-               return (offset - inoffset);
-       }
-       
-       private int intToBERBytes(long i, byte[] buf, int offset) {
-               String bs = Long.toBinaryString(i);
-               int len = (bs.length()%7);
-               bs = ("0000000" + bs).substring(len);
-               char bits[] = bs.toCharArray();
-               int eatenbits = 0;
-               buf[offset] = 0;
-               int inoffset = offset; 
-               //for (int j = bits.length - 1 ; j >= 0 ; j--) {
-               for (int j = 0 ; j < bits.length ; j++) {
-                       if (eatenbits == 7) {
-                               buf[offset] += 128;
-                               offset++;
-                               eatenbits = 0;
-                               buf[offset] = 0;
-                       }
-                       
-                       buf[offset] |= (bits[j]=='1'?1:0) << (6 - eatenbits);
-                       eatenbits++;
-               }
-               offset++;
-               return (offset - inoffset);
-       }
-       
-       
-       
-       /*public void putInteger(int i) {
-               addToTop(new Integer(i));
-       }*/
-       
-       public void putInteger(long i) {
-               addToTop(new Long(i));
-       }
-       
-       public void putOctetString(byte buf[]) {
-               addToTop(new ByteArrWrapper((byte[])buf.clone(), (byte)0x04));
-       }
-
-       public void putOID(long buf[]) {
-               byte bufa[] = new byte[10*buf.length];
-               int offset = 1;
-               bufa[0] = 0x2b;
-               for (int i = 0 ; i < buf.length ; i++) {
-                       offset += intToBERBytes(buf[i], bufa, offset);
-               }
-               byte bufb[] = new byte[offset];
-               for (int i = 0 ; i < bufb.length ; i++)
-                       bufb[i] = bufa[i];
-               
-               addToTop(new ByteArrWrapper(bufb, (byte)0x06));
-       }
-
-       public void startSequence() {
-               startSequence((byte)0x30);
-       }
-       
-       public void startSequence(byte id) {
-               IDVector v = new IDVector(id);
-               addToTop(v);
-               fstack.add(v);
-       }
-       
-       public void endSequence() {
-               fstack.pop();
-       }
-       
-       private void addToTop(Object o) {
-               ((IDVector)fstack.peek()).addElement(o);
-       }
-       
-       private class ByteArrWrapper {
-               public byte arr[];
-               public byte id;
-               
-               public ByteArrWrapper(byte arr[], byte id) {
-                       this.arr = arr;
-                       this.id = id;
-               }
-       }
-       
-       // unpublic
-       private class IDVector extends Vector {
-               private static final long serialVersionUID = 
2689317091785298027L;
-               byte vid = (byte)0x30;
-               
-               public IDVector(byte id) {
-                       super();
-                       vid = id;
-               }
-               
-               public byte getVid() {
-                       return vid;
-               }
-
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/BEREncoder.java (from rev 7998, 
trunk/freenet/src/snmplib/BEREncoder.java)

Deleted: branches/freenet-freejvms/src/snmplib/BadFormatException.java
===================================================================
--- trunk/freenet/src/snmplib/BadFormatException.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/snmplib/BadFormatException.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,7 +0,0 @@
-package snmplib;
-
-public class BadFormatException extends Exception {
-       public BadFormatException(String s) {
-               super(s);
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/BadFormatException.java (from rev 
7998, trunk/freenet/src/snmplib/BadFormatException.java)

Deleted: branches/freenet-freejvms/src/snmplib/DataConstantInt.java
===================================================================
--- trunk/freenet/src/snmplib/DataConstantInt.java      2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/snmplib/DataConstantInt.java  2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,19 +0,0 @@
-package snmplib;
-
-public class DataConstantInt implements DataFetcher {
-       private String OID;
-       private int value;
-       
-       public DataConstantInt(String oID, int value) {
-               this.OID = oID;
-               this.value = value;
-       }
-       
-       public String getSNMPOID() {
-               return OID;
-       }
-
-       public Object getSNMPData() {
-               return new Integer(value);
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/DataConstantInt.java (from rev 
7998, trunk/freenet/src/snmplib/DataConstantInt.java)

Deleted: branches/freenet-freejvms/src/snmplib/DataConstantString.java
===================================================================
--- trunk/freenet/src/snmplib/DataConstantString.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/snmplib/DataConstantString.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,19 +0,0 @@
-package snmplib;
-
-public class DataConstantString implements DataFetcher {
-       private String OID;
-       private String value;
-       
-       public DataConstantString(String oID, String value) {
-               this.OID = oID;
-               this.value = value;
-       }
-       
-       public String getSNMPOID() {
-               return OID;
-       }
-
-       public Object getSNMPData() {
-               return value;
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/DataConstantString.java (from rev 
7998, trunk/freenet/src/snmplib/DataConstantString.java)

Deleted: branches/freenet-freejvms/src/snmplib/DataFetcher.java
===================================================================
--- trunk/freenet/src/snmplib/DataFetcher.java  2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/DataFetcher.java      2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,10 +0,0 @@
-package snmplib;
-
-public interface DataFetcher {
-       
-       public String getSNMPOID();
-       
-       /* Must return an Integer or a String */
-       public Object getSNMPData();
-
-}

Copied: branches/freenet-freejvms/src/snmplib/DataFetcher.java (from rev 7998, 
trunk/freenet/src/snmplib/DataFetcher.java)

Deleted: branches/freenet-freejvms/src/snmplib/DataStatisticsInfo.java
===================================================================
--- trunk/freenet/src/snmplib/DataStatisticsInfo.java   2006-02-03 22:35:15 UTC 
(rev 7998)
+++ branches/freenet-freejvms/src/snmplib/DataStatisticsInfo.java       
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,35 +0,0 @@
-package snmplib;
-
-import freenet.io.comm.IOStatisticCollector;
-
-public class DataStatisticsInfo implements DataFetcher {
-       private String OID;
-       int blocks;
-       boolean in;
-       
-       public DataStatisticsInfo(int blocks, boolean in) {
-               this.OID = "1.1." + blocks + "." + (in?"1":"0");
-               //System.err.println("adding: " + this.OID);
-               this.in = in;
-               this.blocks = blocks;
-       }
-       
-       public String getSNMPOID() {
-               //System.err.println("        " + this.OID);
-               return OID;
-       }
-
-       public Object getSNMPData() {
-               if (blocks == 0) {
-                       long io[] = IOStatisticCollector.getTotalIO();
-                       return new Long(io[in?1:0]);
-               }
-               // else sum all fields up to <blocks>
-               int res = 0;
-               int stats[][] = IOStatisticCollector.getTotalStatistics();
-               for (int i = 0 ; i < blocks ; i++)
-                       res += stats[i][in?1:0];
-               
-               return new Long(res);
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/DataStatisticsInfo.java (from rev 
7998, trunk/freenet/src/snmplib/DataStatisticsInfo.java)

Deleted: branches/freenet-freejvms/src/snmplib/InfoSystem.java
===================================================================
--- trunk/freenet/src/snmplib/InfoSystem.java   2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/InfoSystem.java       2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,37 +0,0 @@
-package snmplib;
-
-public class InfoSystem implements MultiplexedDataFetcher {
-       long created;
-
-       public InfoSystem() {
-               created = System.currentTimeMillis()/1000;
-       }
-       
-       public String getSNMPOID(int index) {
-               switch (index) {
-               case 3: //SNMPv2-MIB::sysUpTime.0
-                       return ".1.3.6.1.2.1.1.3.0";
-               case 0: //UCD-SNMP-MIB::memTotalReal.0
-                       return ".1.3.6.1.4.1.2021.4.5.0";
-               case 1: //UCD-SNMP-MIB::memAvailReal.0
-                       return ".1.3.6.1.4.1.2021.4.6.0";
-               }
-               // default
-               return null;
-       }
-
-       public Object getSNMPData(String oid) {
-               Runtime r = Runtime.getRuntime();
-               int oidhc = oid.hashCode();
-               if (oid.equals(".1.3.6.1.2.1.1.3.0")) //SNMPv2-MIB::sysUpTime.0
-                       return new Long(System.currentTimeMillis()/1000 - 
created);
-               if (oid.equals(".1.3.6.1.4.1.2021.4.5.0")) 
//UCD-SNMP-MIB::memTotalReal.0
-                       return new Long(r.totalMemory());
-               if (oid.equals(".1.3.6.1.4.1.2021.4.6.0")) 
//UCD-SNMP-MIB::memAvailReal.0
-                       return new Long(r.freeMemory());
-               
-               return null;
-       }
-       
-
-}

Copied: branches/freenet-freejvms/src/snmplib/InfoSystem.java (from rev 7998, 
trunk/freenet/src/snmplib/InfoSystem.java)

Deleted: branches/freenet-freejvms/src/snmplib/MultiplexedDataFetcher.java
===================================================================
--- trunk/freenet/src/snmplib/MultiplexedDataFetcher.java       2006-02-03 
22:35:15 UTC (rev 7998)
+++ branches/freenet-freejvms/src/snmplib/MultiplexedDataFetcher.java   
2006-02-03 22:55:27 UTC (rev 7999)
@@ -1,11 +0,0 @@
-package snmplib;
-
-public interface MultiplexedDataFetcher {
-       
-       /* Return null when the last OID is reached */
-       public String getSNMPOID(int index);
-       
-       /* Must return an Integer or a String */
-       public Object getSNMPData(String oid);
-
-}

Copied: branches/freenet-freejvms/src/snmplib/MultiplexedDataFetcher.java (from 
rev 7998, trunk/freenet/src/snmplib/MultiplexedDataFetcher.java)

Deleted: branches/freenet-freejvms/src/snmplib/SNMPAgent.java
===================================================================
--- trunk/freenet/src/snmplib/SNMPAgent.java    2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/SNMPAgent.java        2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,310 +0,0 @@
-package snmplib;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.net.DatagramPacket;
-import java.net.DatagramSocket;
-import java.net.InetAddress;
-import java.util.Iterator;
-import java.util.TreeMap;
-
-public class SNMPAgent implements Runnable {
-       private int port = 4445;
-
-       /**
-        * @param args
-        */
-       public static void main(String[] args) throws IOException {
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantInt("1.1.1", 10));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantInt("1.1.2", 20));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantInt("1.1.3", 30));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantInt("1.1.4", 40));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantInt("1.1.5", 50));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantString("1.1.0", "Step by 10"));
-               SNMPAgent.getSNMPAgent().addFetcher(new 
DataConstantString("1.2", "Nothing here"));
-       }
-       
-    protected DatagramSocket socket = null;
-    protected BufferedReader in = null;
-    protected boolean moreQuotes = true;
-    private TreeMap alldata;
-    private static SNMPAgent _SNMPAgent = null;
-
-    public static void setSNMPPort(int port) {
-       ensureCreated();
-       _SNMPAgent.port = port;
-       restartSNMPAgent();
-    }
-    
-    public static void restartSNMPAgent() {
-       ensureCreated();
-       _SNMPAgent.stopRunning();
-       new Thread(_SNMPAgent).start();
-    }
-    
-    public static SNMPAgent getSNMPAgent() {
-       ensureCreated();
-       return _SNMPAgent;
-    }
-    
-    private static void ensureCreated() {
-       if (_SNMPAgent == null)
-               _SNMPAgent = new SNMPAgent();
-    }
-    
-    private SNMPAgent() {
-       alldata = new TreeMap();
-    }
-    
-    public void addFetcher(DataFetcher df) {
-       //DataHandler dh = new DataHandler(df);
-       //alldata.put(dh.getStringOID(), dh);
-       alldata.put(df.getSNMPOID().replaceAll("^\\.1\\.3\\.",""), df);
-       //System.err.println("sAdded: " + df.getSNMPOID() + "as" + 
df.getSNMPOID().replaceAll("^\\.1\\.3\\.",""));
-    }
-    
-    public void addFetcher(MultiplexedDataFetcher df) {
-       String oid;
-       for (int i = 0 ; (oid = df.getSNMPOID(i)) != null ; i++) {
-               alldata.put(oid.replaceAll("^\\.1\\.3\\.",""), df);
-       //      System.err.println("mAdded: " + oid + " as: " + 
oid.replaceAll("^\\.1\\.3\\.",""));
-       }
-    }
-
-    public void removeFetcher(String OID) {
-       alldata.remove(((OID.startsWith("\\."))?"":".") + OID);
-    }
-    
-    public void stopRunning() {
-       try {
-               socket.close();
-       } catch (Throwable e) {
-               // prpbably since not running...
-       }
-    }
-
-    public void run() {
-       try {
-               socket = new DatagramSocket(port, 
InetAddress.getByName("localhost"));
-       } catch (IOException e) {
-               e.printStackTrace();
-               return ;
-       }
-       // make smaller.... 0484 enough?
-        byte[] buf = new byte[65535];
-        DatagramPacket packet = new DatagramPacket(buf, buf.length);
-
-        while (socket.isBound()) {
-            try {
-                socket.receive(packet);
-
-                RequestContainer rc = new RequestContainer();
-                
-                parseRequest(buf, rc);
-                
-                int replylength = 0;
-                boolean keyfound = false;
-                //DataHandler dh = null;
-                
-                Iterator it = alldata.keySet().iterator();
-                String key = "";
-                if (rc.OID.length() == 0)
-                       rc.OID = "";
-                
-                while (it.hasNext() && !keyfound) {
-                       key = (String)it.next();
-                       //System.err.println("is '"+ rc.OID + "' in: " + key);
-                       if (key.startsWith(rc.OID))
-                               keyfound = true;
-                }
-
-                // keyfound /\ (equal -> hasnext)
-                //System.err.println("("+keyfound+" && 
(!"+key.equals(rc.OID)+" || "+it.hasNext()+"))");
-                if (keyfound && (!key.equals(rc.OID) || it.hasNext())) {
-                       key = key.equals(rc.OID)?(String)it.next():key;
-                       
-                       Object df = alldata.get(key);
-                       //Object key = null;
-                       Object data = null;
-                       //dh = (DataHandler)alldata.get(key);
-                       //rc.lOID = (long[])dh.lOID.clone();
-                       if (df instanceof DataFetcher) {
-                               data = ((DataFetcher)df).getSNMPData();
-                       } else if (df instanceof MultiplexedDataFetcher) {
-                               data = 
((MultiplexedDataFetcher)df).getSNMPData(key);
-                               if (data == null)
-                                       data = 
((MultiplexedDataFetcher)df).getSNMPData(".1.3."+key);
-                       } else
-                               data = new Integer(0);
-                       
-                       rc.lOID = splitToLong(key);
-                       //System.err.println(key);
-                       //for (int i = 0; i < rc.lOID.length ; i++)
-                               //      System.err.print("." + rc.lOID[i]);
-                       
-
-                       replylength = makeIntReply(buf, rc, data);
-                } else {
-                       if (rc.lOID.length > 0)
-                               rc.lOID[0] = 100;
-                       else {
-                               rc.lOID = new long[1];
-                               rc.lOID[0] = 0;
-                       }
-                       replylength = makeIntReply(buf, rc, new Integer(1));
-                }
-                
-                // send the response to the client at "address" and "port"
-                InetAddress address = packet.getAddress();
-                int port = packet.getPort();
-                packet = new DatagramPacket(buf, replylength, address, port);
-                socket.send(packet);
-                
-            } catch (IOException e) {
-                e.printStackTrace();
-                break;
-            } catch (BadFormatException e) {
-               e.printStackTrace();
-               //System.err.println(e.toString());
-            } catch (ArrayIndexOutOfBoundsException e) {
-               e.printStackTrace();
-               // not much to do.. ignore the request and it'll time out
-            }
-        }
-        socket.close();
-    }
-    
-    private int makeIntReply(byte buf[], RequestContainer rc, Object data) /* 
throws SnmpTooBigException */ {
-       int replyLength = 0;
-       BEREncoder be = new BEREncoder();
-       be.startSequence(); // whole pkg
-       be.putInteger(0); // version
-       be.putOctetString(rc.community); // community
-       be.startSequence((byte)0xa2); // Response
-       be.putInteger(rc.requestID); // RID
-       be.putInteger(0); // err
-       be.putInteger(0); // err
-       be.startSequence(); // value
-       be.startSequence(); // value
-       be.putOID(rc.lOID); // oid
-       
-       if (data instanceof Integer)
-               be.putInteger(((Integer)data).intValue());
-       else if (data instanceof Long)
-               be.putInteger(((Long)data).longValue());
-       else if (data instanceof String) {
-               char[] charr = ((String)data).toCharArray();
-               byte[] byarr = new byte[charr.length];
-               for (int i = 0 ; i < charr.length ; i++)
-                       byarr[i] = (byte)charr[i];
-               be.putOctetString(byarr);
-       }
-       
-       replyLength = be.toBytes(buf);
-       
-       return replyLength;
-    }
-    
-    // http://www.rane.com/note161.html
-    private void parseRequest(byte buf[], RequestContainer rc) throws 
BadFormatException {
-       int tmpint;
-       
-       BERDecoder bd = new BERDecoder(buf);
-       bd.startSequence();
-       if ((tmpint = bd.fetchInt()) != 0)
-               throw new BadFormatException("Wrong version, expected 0, got "
-                               + tmpint);
-       
-       rc.community = bd.fetchOctetString();
-       if (! rc.setPDU(bd.peekRaw()))
-               throw new BadFormatException("Unknown PDU");
-       bd.startSequence(bd.peekRaw());
-       rc.requestID = bd.fetchInt();
-       
-       // TODO: care about errors eventually?
-       bd.fetchInt();
-       bd.fetchInt();
-       
-       bd.startSequence();
-       bd.startSequence();
-       rc.lOID = bd.fetchOID();
-       rc.OID = (rc.lOID.length == 0)?".":"";
-       for (int i = 0; i < rc.lOID.length ; i++)
-               rc.OID += (i==0?"":".") + rc.lOID[i];
-       
-    }
-    
-    private long[] splitToLong(String list) {
-       String nums[] = list.split("\\.");
-       long ret[] = new long[nums.length];
-       for(int i = 0; i < ret.length ; i++)
-               ret[i] = Long.parseLong(nums[i]);
-       return ret;
-    }
-    /*
-       private class DataHandler {
-       //public Integer data;
-       public long lOID[] = null;
-       DataFetcher df;
-       
-       public DataHandler(DataFetcher df) {
-               lOID = splitToLong(df.getSNMPOID());
-               this.df = df;
-       }
-       
-        private long[] splitToLong(String list) {
-               String nums[] = list.split("\\.");
-               long ret[] = new long[nums.length];
-               for(int i = 0; i < ret.length ; i++)
-                       ret[i] = Long.parseLong(nums[i]);
-               return ret;
-        }
-
-        public Object getData() {
-               return df.getSNMPData(); 
-        }
-       
-       public String getStringOID() {
-               String ret = "";
-                       for (int i = 0; i < lOID.length ; i++)
-                               ret += "." + lOID[i];
-               return ret;
-       }
-    }
-    */
-    
-    
-    
-
-    private class RequestContainer {
-       public long lOID[] = null;
-       public byte community[] = null;
-       public int pdutype = 0;
-       public static final int PDU_GET_NEXT = 2;
-       public static final int PDU_GET_THIS = 1;
-       public int requestID = 0;
-       public String OID = null;
-       
-       public boolean setPDU(byte id) {
-               switch(id) {
-               case (byte)0xA0:
-                       pdutype = PDU_GET_THIS;
-               break;
-               
-               case (byte)0xA1:
-                       pdutype = PDU_GET_NEXT;
-               break;
-               
-               default:
-                       //System.err.println("Unknown PDU: 0x" + 
Integer.toHexString((id + 256)%256));
-                       return false;
-               }
-               return true;
-       }
-       
-       public String toString() {
-               return ("Community: " + new String(community) +
-                               ", PDU: " + pdutype + ", OID: " + OID);
-       }
-    }
-}

Copied: branches/freenet-freejvms/src/snmplib/SNMPAgent.java (from rev 7998, 
trunk/freenet/src/snmplib/SNMPAgent.java)

Deleted: branches/freenet-freejvms/src/snmplib/SNMPStarter.java
===================================================================
--- trunk/freenet/src/snmplib/SNMPStarter.java  2006-02-03 22:35:15 UTC (rev 
7998)
+++ branches/freenet-freejvms/src/snmplib/SNMPStarter.java      2006-02-03 
22:55:27 UTC (rev 7999)
@@ -1,27 +0,0 @@
-package snmplib;
-
-import snmplib.DataStatisticsInfo;
-import freenet.io.comm.IOStatisticCollector;
-
-/**
- * @author cyberdo
- *
- * Creates the SNMP-agent
- */
-public class SNMPStarter {
-
-       private static boolean has_been_runned = false;
-
-       public static void initialize() {
-               //SNMPAgent.setSNMPPort(port);
-               if (has_been_runned) return;
-               // 0 is toatl I/O
-               for (int i = 0 ; i < IOStatisticCollector.STATISTICS_ENTRIES ; 
i++) {
-                       SNMPAgent.getSNMPAgent().addFetcher(new 
DataStatisticsInfo(i, true));
-                       SNMPAgent.getSNMPAgent().addFetcher(new 
DataStatisticsInfo(i, false));
-               }
-               SNMPAgent.getSNMPAgent().addFetcher(new InfoSystem());
-               
-               has_been_runned = true;
-       }
-}

Copied: branches/freenet-freejvms/src/snmplib/SNMPStarter.java (from rev 7998, 
trunk/freenet/src/snmplib/SNMPStarter.java)


Reply via email to