Author: toad
Date: 2009-04-01 20:34:09 +0000 (Wed, 01 Apr 2009)
New Revision: 26322
Added:
trunk/freenet/src/freenet/client/ArchiveHandlerImpl.java
trunk/freenet/src/freenet/client/FECCallback.java
trunk/freenet/src/freenet/client/FECQueue.java
trunk/freenet/src/freenet/client/TempFetchResult.java
trunk/freenet/src/freenet/client/async/ChosenBlock.java
trunk/freenet/src/freenet/client/async/ClientContext.java
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerBase.java
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerNonPersistent.java
trunk/freenet/src/freenet/client/async/CooldownQueue.java
trunk/freenet/src/freenet/client/async/DBJob.java
trunk/freenet/src/freenet/client/async/DBJobRunner.java
trunk/freenet/src/freenet/client/async/DatastoreChecker.java
trunk/freenet/src/freenet/client/async/DatastoreCheckerItem.java
trunk/freenet/src/freenet/client/async/Db4oBugs.java
trunk/freenet/src/freenet/client/async/Encodeable.java
trunk/freenet/src/freenet/client/async/HasKeyListener.java
trunk/freenet/src/freenet/client/async/InsertCompressor.java
trunk/freenet/src/freenet/client/async/KeyListener.java
trunk/freenet/src/freenet/client/async/KeyListenerConstructionException.java
trunk/freenet/src/freenet/client/async/NoValidBlocksException.java
trunk/freenet/src/freenet/client/async/PersistentChosenBlock.java
trunk/freenet/src/freenet/client/async/PersistentChosenRequest.java
trunk/freenet/src/freenet/client/async/PersistentCooldownQueue.java
trunk/freenet/src/freenet/client/async/PersistentCooldownQueueItem.java
trunk/freenet/src/freenet/client/async/PersistentSendableRequestSet.java
trunk/freenet/src/freenet/client/async/RegisterMe.java
trunk/freenet/src/freenet/client/async/SendableRequestSet.java
trunk/freenet/src/freenet/client/async/SingleKeyListener.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherKeyListener.java
trunk/freenet/src/freenet/client/async/TransientChosenBlock.java
trunk/freenet/src/freenet/client/async/TransientSendableRequestSet.java
trunk/freenet/src/freenet/client/async/USKFetcherTag.java
trunk/freenet/src/freenet/client/async/USKManagerPersistent.java
trunk/freenet/src/freenet/config/NullBooleanCallback.java
trunk/freenet/src/freenet/config/NullIntCallback.java
trunk/freenet/src/freenet/config/NullLongCallback.java
trunk/freenet/src/freenet/config/NullShortCallback.java
trunk/freenet/src/freenet/config/NullStringCallback.java
trunk/freenet/src/freenet/node/BulkCallFailureItem.java
trunk/freenet/src/freenet/node/HandlePortTuple.java
trunk/freenet/src/freenet/node/NodeRestartJobsQueue.java
trunk/freenet/src/freenet/node/NullSendableRequestItem.java
trunk/freenet/src/freenet/node/RequestClient.java
trunk/freenet/src/freenet/node/SendableGetRequestSender.java
trunk/freenet/src/freenet/node/SendableRequestItem.java
trunk/freenet/src/freenet/node/SendableRequestSender.java
trunk/freenet/src/freenet/node/SessionKey.java
trunk/freenet/src/freenet/node/SupportsBulkCallFailure.java
trunk/freenet/src/freenet/node/fcp/FCPPersistentRoot.java
trunk/freenet/src/freenet/support/BinaryBloomFilter.java
trunk/freenet/src/freenet/support/BloomFilter.java
trunk/freenet/src/freenet/support/CountingBloomFilter.java
trunk/freenet/src/freenet/support/DebuggingHashMap.java
trunk/freenet/src/freenet/support/NullBloomFilter.java
trunk/freenet/src/freenet/support/NullObject.java
trunk/freenet/src/freenet/support/PrioritizedSerialExecutor.java
trunk/freenet/src/freenet/support/RemoveRandomParent.java
trunk/freenet/src/freenet/support/io/BucketArrayWrapper.java
trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
trunk/freenet/src/freenet/support/io/NotPersistentBucket.java
trunk/freenet/src/freenet/support/io/PersistentBlobTempBucket.java
trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketFactory.java
trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketTag.java
trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucket.java
trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucketKillJob.java
trunk/freenet/src/freenet/support/io/SegmentedChainBucketSegment.java
trunk/freenet/src/net/
trunk/freenet/src/net/i2p/
trunk/freenet/src/net/i2p/util/
trunk/freenet/src/net/i2p/util/NativeBigInteger.java
trunk/freenet/test/net/
trunk/freenet/test/net/i2p/
trunk/freenet/test/net/i2p/util/
trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java
Removed:
trunk/freenet/src/freenet/node/SessionKey.java
trunk/freenet/src/freenet/support/BinaryBloomFilter.java
trunk/freenet/src/freenet/support/BloomFilter.java
trunk/freenet/src/freenet/support/CountingBloomFilter.java
trunk/freenet/src/freenet/support/NullBloomFilter.java
trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
trunk/freenet/src/net/
trunk/freenet/src/net/i2p/
trunk/freenet/src/net/i2p/util/
trunk/freenet/src/net/i2p/util/NativeBigInteger.java
trunk/freenet/test/net/
trunk/freenet/test/net/i2p/
trunk/freenet/test/net/i2p/util/
trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java
Modified:
trunk/freenet/
trunk/freenet/.classpath
trunk/freenet/src/freenet/client/
trunk/freenet/src/freenet/client/ArchiveContext.java
trunk/freenet/src/freenet/client/ArchiveExtractCallback.java
trunk/freenet/src/freenet/client/ArchiveHandler.java
trunk/freenet/src/freenet/client/ArchiveManager.java
trunk/freenet/src/freenet/client/ArchiveStoreContext.java
trunk/freenet/src/freenet/client/ArchiveStoreItem.java
trunk/freenet/src/freenet/client/ClientMetadata.java
trunk/freenet/src/freenet/client/FECCodec.java
trunk/freenet/src/freenet/client/FECJob.java
trunk/freenet/src/freenet/client/FailureCodeTracker.java
trunk/freenet/src/freenet/client/FetchContext.java
trunk/freenet/src/freenet/client/FetchException.java
trunk/freenet/src/freenet/client/FetchWaiter.java
trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
trunk/freenet/src/freenet/client/InsertBlock.java
trunk/freenet/src/freenet/client/InsertContext.java
trunk/freenet/src/freenet/client/InsertException.java
trunk/freenet/src/freenet/client/Metadata.java
trunk/freenet/src/freenet/client/PutWaiter.java
trunk/freenet/src/freenet/client/RealArchiveStoreItem.java
trunk/freenet/src/freenet/client/SplitfileBlock.java
trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
trunk/freenet/src/freenet/client/async/BackgroundBlockEncoder.java
trunk/freenet/src/freenet/client/async/BaseClientGetter.java
trunk/freenet/src/freenet/client/async/BaseClientPutter.java
trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
trunk/freenet/src/freenet/client/async/BinaryBlobInserter.java
trunk/freenet/src/freenet/client/async/BlockSet.java
trunk/freenet/src/freenet/client/async/ClientCallback.java
trunk/freenet/src/freenet/client/async/ClientGetState.java
trunk/freenet/src/freenet/client/async/ClientGetter.java
trunk/freenet/src/freenet/client/async/ClientPutState.java
trunk/freenet/src/freenet/client/async/ClientPutter.java
trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
trunk/freenet/src/freenet/client/async/ClientRequester.java
trunk/freenet/src/freenet/client/async/GetCompletionCallback.java
trunk/freenet/src/freenet/client/async/HealingQueue.java
trunk/freenet/src/freenet/client/async/ManifestElement.java
trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java
trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java
trunk/freenet/src/freenet/client/async/OfferedKeysList.java
trunk/freenet/src/freenet/client/async/PutCompletionCallback.java
trunk/freenet/src/freenet/client/async/RequestCooldownQueue.java
trunk/freenet/src/freenet/client/async/SimpleBlockSet.java
trunk/freenet/src/freenet/client/async/SimpleHealingQueue.java
trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
trunk/freenet/src/freenet/client/async/SingleBlockInserter.java
trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
trunk/freenet/src/freenet/client/async/SingleFileInserter.java
trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
trunk/freenet/src/freenet/client/async/SplitFileInserter.java
trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java
trunk/freenet/src/freenet/client/async/USKCallback.java
trunk/freenet/src/freenet/client/async/USKChecker.java
trunk/freenet/src/freenet/client/async/USKCheckerCallback.java
trunk/freenet/src/freenet/client/async/USKFetcher.java
trunk/freenet/src/freenet/client/async/USKFetcherCallback.java
trunk/freenet/src/freenet/client/async/USKFetcherWrapper.java
trunk/freenet/src/freenet/client/async/USKInserter.java
trunk/freenet/src/freenet/client/async/USKManager.java
trunk/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
trunk/freenet/src/freenet/client/async/USKRetriever.java
trunk/freenet/src/freenet/client/events/ClientEventListener.java
trunk/freenet/src/freenet/client/events/ClientEventProducer.java
trunk/freenet/src/freenet/client/events/EventDumper.java
trunk/freenet/src/freenet/client/events/EventLogger.java
trunk/freenet/src/freenet/client/events/SimpleEventProducer.java
trunk/freenet/src/freenet/client/events/SplitfileProgressEvent.java
trunk/freenet/src/freenet/clients/http/FProxyToadlet.java
trunk/freenet/src/freenet/clients/http/QueueToadlet.java
trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
trunk/freenet/src/freenet/clients/http/Toadlet.java
trunk/freenet/src/freenet/clients/http/bookmark/BookmarkManager.java
trunk/freenet/src/freenet/clients/http/filter/CSSReadFilter.java
trunk/freenet/src/freenet/clients/http/filter/HTMLFilter.java
trunk/freenet/src/freenet/clients/http/filter/JPEGFilter.java
trunk/freenet/src/freenet/clients/http/filter/PNGFilter.java
trunk/freenet/src/freenet/config/SubConfig.java
trunk/freenet/src/freenet/crypt/DSAGroup.java
trunk/freenet/src/freenet/crypt/DSAPrivateKey.java
trunk/freenet/src/freenet/crypt/DSAPublicKey.java
trunk/freenet/src/freenet/io/comm/RetrievalException.java
trunk/freenet/src/freenet/keys/CHKBlock.java
trunk/freenet/src/freenet/keys/ClientCHK.java
trunk/freenet/src/freenet/keys/ClientCHKBlock.java
trunk/freenet/src/freenet/keys/ClientKey.java
trunk/freenet/src/freenet/keys/ClientSSK.java
trunk/freenet/src/freenet/keys/ClientSSKBlock.java
trunk/freenet/src/freenet/keys/FreenetURI.java
trunk/freenet/src/freenet/keys/InsertableClientSSK.java
trunk/freenet/src/freenet/keys/InsertableUSK.java
trunk/freenet/src/freenet/keys/Key.java
trunk/freenet/src/freenet/keys/KeyBlock.java
trunk/freenet/src/freenet/keys/NodeCHK.java
trunk/freenet/src/freenet/keys/NodeSSK.java
trunk/freenet/src/freenet/keys/SSKBlock.java
trunk/freenet/src/freenet/keys/USK.java
trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties
trunk/freenet/src/freenet/node/BaseSendableGet.java
trunk/freenet/src/freenet/node/FNPPacketMangler.java
trunk/freenet/src/freenet/node/FailureTable.java
trunk/freenet/src/freenet/node/KeysFetchingLocally.java
trunk/freenet/src/freenet/node/LowLevelGetException.java
trunk/freenet/src/freenet/node/LowLevelPutException.java
trunk/freenet/src/freenet/node/Node.java
trunk/freenet/src/freenet/node/NodeARKInserter.java
trunk/freenet/src/freenet/node/NodeClientCore.java
trunk/freenet/src/freenet/node/NodeCrypto.java
trunk/freenet/src/freenet/node/NodeInitException.java
trunk/freenet/src/freenet/node/PeerNode.java
trunk/freenet/src/freenet/node/RequestScheduler.java
trunk/freenet/src/freenet/node/RequestSender.java
trunk/freenet/src/freenet/node/RequestStarter.java
trunk/freenet/src/freenet/node/RequestStarterGroup.java
trunk/freenet/src/freenet/node/SSKInsertSender.java
trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java
trunk/freenet/src/freenet/node/SendableGet.java
trunk/freenet/src/freenet/node/SendableInsert.java
trunk/freenet/src/freenet/node/SendableRequest.java
trunk/freenet/src/freenet/node/SimpleSendableInsert.java
trunk/freenet/src/freenet/node/TextModeClientInterface.java
trunk/freenet/src/freenet/node/fcp/AddPeer.java
trunk/freenet/src/freenet/node/fcp/AllDataMessage.java
trunk/freenet/src/freenet/node/fcp/ClientGet.java
trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java
trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java
trunk/freenet/src/freenet/node/fcp/ClientPut.java
trunk/freenet/src/freenet/node/fcp/ClientPutBase.java
trunk/freenet/src/freenet/node/fcp/ClientPutComplexDirMessage.java
trunk/freenet/src/freenet/node/fcp/ClientPutDir.java
trunk/freenet/src/freenet/node/fcp/ClientPutDiskDirMessage.java
trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java
trunk/freenet/src/freenet/node/fcp/ClientRequest.java
trunk/freenet/src/freenet/node/fcp/CloseConnectionDuplicateClientNameMessage.java
trunk/freenet/src/freenet/node/fcp/ConfigData.java
trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java
trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java
trunk/freenet/src/freenet/node/fcp/DirPutFile.java
trunk/freenet/src/freenet/node/fcp/DirectDirPutFile.java
trunk/freenet/src/freenet/node/fcp/DiskDirPutFile.java
trunk/freenet/src/freenet/node/fcp/EndListPeerNotesMessage.java
trunk/freenet/src/freenet/node/fcp/EndListPeersMessage.java
trunk/freenet/src/freenet/node/fcp/EndListPersistentRequestsMessage.java
trunk/freenet/src/freenet/node/fcp/FCPClient.java
trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java
trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java
trunk/freenet/src/freenet/node/fcp/FCPMessage.java
trunk/freenet/src/freenet/node/fcp/FCPPluginMessage.java
trunk/freenet/src/freenet/node/fcp/FCPPluginReply.java
trunk/freenet/src/freenet/node/fcp/FCPServer.java
trunk/freenet/src/freenet/node/fcp/FinishedCompressionMessage.java
trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java
trunk/freenet/src/freenet/node/fcp/GetConfig.java
trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java
trunk/freenet/src/freenet/node/fcp/GetNode.java
trunk/freenet/src/freenet/node/fcp/GetPluginInfo.java
trunk/freenet/src/freenet/node/fcp/GetRequestStatusMessage.java
trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java
trunk/freenet/src/freenet/node/fcp/ListPeerMessage.java
trunk/freenet/src/freenet/node/fcp/ListPeerNotesMessage.java
trunk/freenet/src/freenet/node/fcp/ListPeersMessage.java
trunk/freenet/src/freenet/node/fcp/ListPersistentRequestsMessage.java
trunk/freenet/src/freenet/node/fcp/ModifyConfig.java
trunk/freenet/src/freenet/node/fcp/ModifyPeer.java
trunk/freenet/src/freenet/node/fcp/ModifyPeerNote.java
trunk/freenet/src/freenet/node/fcp/ModifyPersistentRequest.java
trunk/freenet/src/freenet/node/fcp/NodeData.java
trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java
trunk/freenet/src/freenet/node/fcp/PeerMessage.java
trunk/freenet/src/freenet/node/fcp/PeerNote.java
trunk/freenet/src/freenet/node/fcp/PeerRemoved.java
trunk/freenet/src/freenet/node/fcp/PersistentGet.java
trunk/freenet/src/freenet/node/fcp/PersistentPut.java
trunk/freenet/src/freenet/node/fcp/PersistentPutDir.java
trunk/freenet/src/freenet/node/fcp/PersistentRequestModifiedMessage.java
trunk/freenet/src/freenet/node/fcp/PersistentRequestRemovedMessage.java
trunk/freenet/src/freenet/node/fcp/PluginInfoMessage.java
trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java
trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java
trunk/freenet/src/freenet/node/fcp/PutFetchableMessage.java
trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java
trunk/freenet/src/freenet/node/fcp/RedirectDirPutFile.java
trunk/freenet/src/freenet/node/fcp/RemovePeer.java
trunk/freenet/src/freenet/node/fcp/RemovePersistentRequest.java
trunk/freenet/src/freenet/node/fcp/RequestCompletionCallback.java
trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java
trunk/freenet/src/freenet/node/fcp/ShutdownMessage.java
trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java
trunk/freenet/src/freenet/node/fcp/StartedCompressionMessage.java
trunk/freenet/src/freenet/node/fcp/SubscribeUSK.java
trunk/freenet/src/freenet/node/fcp/SubscribeUSKMessage.java
trunk/freenet/src/freenet/node/fcp/SubscribedUSKMessage.java
trunk/freenet/src/freenet/node/fcp/SubscribedUSKUpdate.java
trunk/freenet/src/freenet/node/fcp/TestDDACompleteMessage.java
trunk/freenet/src/freenet/node/fcp/TestDDAReplyMessage.java
trunk/freenet/src/freenet/node/fcp/TestDDARequestMessage.java
trunk/freenet/src/freenet/node/fcp/TestDDAResponseMessage.java
trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java
trunk/freenet/src/freenet/node/fcp/UnknownNodeIdentifierMessage.java
trunk/freenet/src/freenet/node/fcp/UnknownPeerNoteTypeMessage.java
trunk/freenet/src/freenet/node/fcp/WatchGlobal.java
trunk/freenet/src/freenet/node/simulator/RealNodeBusyNetworkTest.java
trunk/freenet/src/freenet/node/updater/NodeUpdateManager.java
trunk/freenet/src/freenet/node/updater/NodeUpdater.java
trunk/freenet/src/freenet/node/updater/RevocationChecker.java
trunk/freenet/src/freenet/node/updater/UpdateOverMandatoryManager.java
trunk/freenet/src/freenet/pluginmanager/PluginReplySender.java
trunk/freenet/src/freenet/pluginmanager/PluginReplySenderFCP.java
trunk/freenet/src/freenet/pluginmanager/PluginTalker.java
trunk/freenet/src/freenet/support/BitArray.java
trunk/freenet/src/freenet/support/RandomGrabArray.java
trunk/freenet/src/freenet/support/RandomGrabArrayItem.java
trunk/freenet/src/freenet/support/RandomGrabArrayItemExclusionList.java
trunk/freenet/src/freenet/support/RandomGrabArrayWithClient.java
trunk/freenet/src/freenet/support/RemoveRandom.java
trunk/freenet/src/freenet/support/RemoveRandomWithObject.java
trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithInt.java
trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithObject.java
trunk/freenet/src/freenet/support/SimpleFieldSet.java
trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
trunk/freenet/src/freenet/support/SortedVectorByNumber.java
trunk/freenet/src/freenet/support/TransferThread.java
trunk/freenet/src/freenet/support/api/Bucket.java
trunk/freenet/src/freenet/support/api/BucketFactory.java
trunk/freenet/src/freenet/support/compress/CompressJob.java
trunk/freenet/src/freenet/support/compress/Compressor.java
trunk/freenet/src/freenet/support/compress/GzipCompressor.java
trunk/freenet/src/freenet/support/compress/RealCompressor.java
trunk/freenet/src/freenet/support/io/ArrayBucket.java
trunk/freenet/src/freenet/support/io/BaseFileBucket.java
trunk/freenet/src/freenet/support/io/BucketChainBucket.java
trunk/freenet/src/freenet/support/io/BucketTools.java
trunk/freenet/src/freenet/support/io/DelayedFreeBucket.java
trunk/freenet/src/freenet/support/io/FileBucket.java
trunk/freenet/src/freenet/support/io/FileUtil.java
trunk/freenet/src/freenet/support/io/FilenameGenerator.java
trunk/freenet/src/freenet/support/io/MultiReaderBucket.java
trunk/freenet/src/freenet/support/io/NullBucket.java
trunk/freenet/src/freenet/support/io/NullPersistentFileTracker.java
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
trunk/freenet/src/freenet/support/io/PersistentFileTracker.java
trunk/freenet/src/freenet/support/io/PersistentTempBucketFactory.java
trunk/freenet/src/freenet/support/io/PersistentTempFileBucket.java
trunk/freenet/src/freenet/support/io/ReadOnlyFileSliceBucket.java
trunk/freenet/src/freenet/support/io/TempBucketFactory.java
trunk/freenet/src/freenet/support/io/TempFileBucket.java
trunk/freenet/test/freenet/support/io/MockInputStream.java
Log:
Merge db4o into trunk (!!!)
Property changes on: trunk/freenet
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/db4o:25594
/branches/db4o/freenet:24785,25282,25290,25332,25351-25352,25355-25356,25479,25488,25505,25540,25594,25673,25713-25714,25931,25977
+ /branches/db4o:25594
/branches/db4o/freenet:19964-26320
Modified: trunk/freenet/.classpath
===================================================================
--- trunk/freenet/.classpath 2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/.classpath 2009-04-01 20:34:09 UTC (rev 26322)
@@ -4,6 +4,7 @@
<classpathentry including="freenet/|org/" kind="src" path="test"/>
<classpathentry kind="con"
path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="lib" path="/usr/share/java/junit.jar"/>
+ <classpathentry kind="lib" path="lib/db4o-7.2.39.10644-java5.jar"/>
<classpathentry kind="lib" path="lib/freenet-ext.jar"/>
<classpathentry kind="output" path="bin"/>
</classpath>
Property changes on: trunk/freenet/src/freenet/client
___________________________________________________________________
Deleted: svn:mergeinfo
-
/branches/db4o/freenet/src/freenet/client:24785,25282,25290,25332,25351-25353,25355-25356,25479,25488,25505,25540,25594,25673,25713-25714,25931,25977
/branches/db4o/src/freenet/client:25594
Modified: trunk/freenet/src/freenet/client/ArchiveContext.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveContext.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveContext.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,6 +4,9 @@
package freenet.client;
import java.util.HashSet;
+
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
/**
@@ -33,4 +36,9 @@
if(soFar.size() > maxArchiveLevels)
throw new
ArchiveFailureException(ArchiveFailureException.TOO_MANY_LEVELS);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(soFar);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/ArchiveExtractCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveExtractCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveExtractCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,15 +1,27 @@
package freenet.client;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.support.api.Bucket;
/** Called when we have extracted an archive, and a specified file either is
* or isn't in it. */
public interface ArchiveExtractCallback {
- /** Got the data */
- public void gotBucket(Bucket data);
+ /** Got the data.
+ * Note that the bucket will be persistent if the caller asked for an
off-thread extraction. */
+ public void gotBucket(Bucket data, ObjectContainer container,
ClientContext context);
/** Not in the archive */
- public void notInArchive();
+ public void notInArchive(ObjectContainer container, ClientContext
context);
+ /** Failed: restart */
+ public void onFailed(ArchiveRestartException e, ObjectContainer
container, ClientContext context);
+
+ /** Failed for some other reason */
+ public void onFailed(ArchiveFailureException e, ObjectContainer
container, ClientContext context);
+
+ public void removeFrom(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/client/ArchiveHandler.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveHandler.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveHandler.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,46 +3,99 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
+import freenet.client.ArchiveManager.ARCHIVE_TYPE;
+import freenet.client.async.ClientContext;
+import freenet.keys.FreenetURI;
import freenet.support.api.Bucket;
/**
* @author toad
* The public face (to Fetcher, for example) of ArchiveStoreContext.
- * Just has methods for fetching stuff.
+ * Mostly has methods for fetching stuff, but SingleFileFetcher needs to be
able
+ * to download and then ask the ArchiveManager to extract it, so we include
that
+ * functionality (extractToCache) too. Because ArchiveManager is not
persistent,
+ * we have to pass it in to each method.
*/
public interface ArchiveHandler {
/**
* Get the metadata for this ZIP manifest, as a Bucket.
+ * THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
+ * @return The metadata as a Bucket, or null.
+ * @param manager The ArchiveManager.
* @throws FetchException If the container could not be fetched.
* @throws MetadataParseException If there was an error parsing
intermediary metadata.
*/
public abstract Bucket getMetadata(ArchiveContext archiveContext,
ClientMetadata dm, int recursionLevel,
- boolean dontEnterImplicitArchives)
+ boolean dontEnterImplicitArchives, ArchiveManager
manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;
/**
* Get a file from this ZIP manifest, as a Bucket.
- * If possible, read it from cache. If necessary, refetch the
- * container and extract it. If that fails, throw.
+ * If possible, read it from cache. If not, return null.
+ * THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
* @param inSplitZipManifest If true, indicates that the key points to
a splitfile zip manifest,
* which means that we need to pass a flag to the fetcher to tell it to
pretend it was a straight
* splitfile.
+ * @param manager The ArchiveManager.
* @throws FetchException
* @throws MetadataParseException
*/
public abstract Bucket get(String internalName,
ArchiveContext archiveContext,
ClientMetadata dm, int recursionLevel,
- boolean dontEnterImplicitArchives)
+ boolean dontEnterImplicitArchives, ArchiveManager
manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;
/**
* Get the archive type.
*/
- public abstract short getArchiveType();
+ public abstract ARCHIVE_TYPE getArchiveType();
+ /**
+ * Get the key.
+ */
+ public abstract FreenetURI getKey();
+
+ /**
+ * Unpack a fetched archive to cache, and call the callback if there is
one.
+ * @param bucket The downloaded data for the archive.
+ * @param actx The ArchiveContext.
+ * @param element The single element that the caller is especially
interested in.
+ * @param callback Callback to be notified whether the content is
available, and if so, fed the data.
+ * @param manager The ArchiveManager.
+ * @throws ArchiveFailureException
+ * @throws ArchiveRestartException
+ */
+ public abstract void extractToCache(Bucket bucket, ArchiveContext actx,
String element, ArchiveExtractCallback callback, ArchiveManager manager,
+ ObjectContainer container, ClientContext context)
throws ArchiveFailureException, ArchiveRestartException;
+
+ /**
+ * Unpack a fetched archive on a separate thread for a persistent
caller.
+ * This involves:
+ * - Add a tag to the database so that it will be restarted on a crash.
+ * - Run the actual unpack on a separate thread.
+ * - Copy the data to a persistent bucket.
+ * - Schedule a database job.
+ * - Call the callback.
+ * @param bucket
+ * @param actx
+ * @param element
+ * @param callback
+ * @param container
+ * @param context
+ */
+ public abstract void extractPersistentOffThread(Bucket bucket, boolean
freeBucket, ArchiveContext actx, String element, ArchiveExtractCallback
callback, ObjectContainer container, ClientContext context);
+
+ public abstract void activateForExecution(ObjectContainer container);
+
+ public abstract ArchiveHandler cloneHandler();
+
+ public abstract void removeFrom(ObjectContainer container);
+
}
\ No newline at end of file
Copied: trunk/freenet/src/freenet/client/ArchiveHandlerImpl.java (from rev
26320, branches/db4o/freenet/src/freenet/client/ArchiveHandlerImpl.java)
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveHandlerImpl.java
(rev 0)
+++ trunk/freenet/src/freenet/client/ArchiveHandlerImpl.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,302 @@
+package freenet.client;
+
+import java.io.IOException;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+
+import freenet.client.ArchiveManager.ARCHIVE_TYPE;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.keys.FreenetURI;
+import freenet.support.LogThresholdCallback;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
+import freenet.support.io.BucketTools;
+import freenet.support.io.NativeThread;
+
+class ArchiveHandlerImpl implements ArchiveHandler {
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ private final FreenetURI key;
+ private boolean forceRefetchArchive;
+ ARCHIVE_TYPE archiveType;
+ COMPRESSOR_TYPE compressorType;
+
+ ArchiveHandlerImpl(FreenetURI key, ARCHIVE_TYPE archiveType,
COMPRESSOR_TYPE ctype, boolean forceRefetchArchive) {
+ this.key = key;
+ this.archiveType = archiveType;
+ this.compressorType = ctype;
+ this.forceRefetchArchive = forceRefetchArchive;
+ }
+
+ public Bucket get(String internalName, ArchiveContext archiveContext,
+ ClientMetadata dm, int recursionLevel,
+ boolean dontEnterImplicitArchives, ArchiveManager
manager)
+ throws ArchiveFailureException, ArchiveRestartException,
+ MetadataParseException, FetchException {
+
+ // Do loop detection on the archive that we are about to fetch.
+ archiveContext.doLoopDetection(key);
+
+ if(forceRefetchArchive) return null;
+
+ Bucket data;
+
+ // Fetch from cache
+ if(logMINOR)
+ Logger.minor(this, "Checking cache: "+key+ ' '
+internalName);
+ if((data = manager.getCached(key, internalName)) != null) {
+ return data;
+ }
+
+ return null;
+ }
+
+ public Bucket getMetadata(ArchiveContext archiveContext, ClientMetadata
dm,
+ int recursionLevel, boolean dontEnterImplicitArchives,
+ ArchiveManager manager) throws ArchiveFailureException,
+ ArchiveRestartException, MetadataParseException,
FetchException {
+ return get(".metadata", archiveContext, dm, recursionLevel,
dontEnterImplicitArchives, manager);
+ }
+
+ public void extractToCache(Bucket bucket, ArchiveContext actx,
+ String element, ArchiveExtractCallback callback,
+ ArchiveManager manager, ObjectContainer container,
ClientContext context) throws ArchiveFailureException,
+ ArchiveRestartException {
+ forceRefetchArchive = false; // now we don't need to force
refetch any more
+ ArchiveStoreContext ctx = manager.makeContext(key, archiveType,
compressorType, false);
+ manager.extractToCache(key, archiveType, compressorType,
bucket, actx, ctx, element, callback, container, context);
+ }
+
+ public ARCHIVE_TYPE getArchiveType() {
+ return archiveType;
+ }
+
+ public COMPRESSOR_TYPE getCompressorType() {
+ return compressorType;
+ }
+
+ public FreenetURI getKey() {
+ return key;
+ }
+
+ /**
+ * Unpack a fetched archive on a separate thread for a persistent
caller.
+ * This involves:
+ * - Add a tag to the database so that it will be restarted on a crash.
+ * - Run the actual unpack on a separate thread.
+ * - Copy the data to a persistent bucket.
+ * - Schedule a database job.
+ * - Call the callback.
+ * @param bucket
+ * @param actx
+ * @param element
+ * @param callback
+ * @param container
+ * @param context
+ */
+ public void extractPersistentOffThread(Bucket bucket, boolean
freeBucket, ArchiveContext actx, String element, ArchiveExtractCallback
callback, ObjectContainer container, final ClientContext context) {
+ assert(element != null); // no callback would be called...
+ final ArchiveManager manager = context.archiveManager;
+ final ArchiveExtractTag tag = new ArchiveExtractTag(this,
bucket, freeBucket, actx, element, callback, context.nodeDBHandle);
+ container.store(tag);
+ runPersistentOffThread(tag, context, manager,
context.persistentBucketFactory);
+ }
+
+ private static void runPersistentOffThread(final ArchiveExtractTag tag,
final ClientContext context, final ArchiveManager manager, final BucketFactory
bf) {
+ final ProxyCallback proxyCallback = new ProxyCallback();
+
+ if(logMINOR)
+ Logger.minor(ArchiveHandlerImpl.class, "Scheduling
off-thread extraction: "+tag.data+" for "+tag.handler.key+" element
"+tag.element+" for "+tag.callback, new Exception("debug"));
+
+ context.mainExecutor.execute(new Runnable() {
+
+ public void run() {
+ try {
+ if(logMINOR)
+ Logger.minor(this, "Extracting
off-thread: "+tag.data+" for "+tag.handler.key+" element "+tag.element+" for
"+tag.callback);
+ tag.handler.extractToCache(tag.data,
tag.actx, tag.element, proxyCallback, manager, null, context);
+ if(logMINOR)
+ Logger.minor(this, "Extracted");
+ final Bucket data;
+ if(proxyCallback.data == null)
+ data = null;
+ else {
+ try {
+ if(logMINOR)
+
Logger.minor(this, "Copying data...");
+ data =
bf.makeBucket(proxyCallback.data.size());
+
BucketTools.copy(proxyCallback.data, data);
+
proxyCallback.data.free();
+ if(logMINOR)
+
Logger.minor(this, "Copied and freed original");
+ } catch (IOException e) {
+ throw new
ArchiveFailureException("Failure copying data to persistent storage", e);
+ }
+ }
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ if(logMINOR)
+
Logger.minor(this, "Calling callback for "+tag.data+" for "+tag.handler.key+"
element "+tag.element+" for "+tag.callback);
+
container.activate(tag.callback, 1);
+ if(proxyCallback.data
== null)
+
tag.callback.notInArchive(container, context);
+ else
+
tag.callback.gotBucket(data, container, context);
+
tag.callback.removeFrom(container);
+ if(tag.freeBucket) {
+ tag.data.free();
+
tag.data.removeFrom(container);
+ }
+
container.deactivate(tag.callback, 1);
+ container.delete(tag);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+
+ } catch (final ArchiveFailureException e) {
+
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
container.activate(tag.callback, 1);
+
tag.callback.onFailed(e, container, context);
+
tag.callback.removeFrom(container);
+ if(tag.freeBucket) {
+ tag.data.free();
+
tag.data.removeFrom(container);
+ }
+ container.delete(tag);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+
+ } catch (final ArchiveRestartException e) {
+
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
container.activate(tag.callback, 1);
+
tag.callback.onFailed(e, container, context);
+
tag.callback.removeFrom(container);
+ if(tag.freeBucket) {
+ tag.data.free();
+
tag.data.removeFrom(container);
+ }
+ container.delete(tag);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+
+ }
+ }
+
+ }, "Off-thread extract");
+ }
+
+ /** Called from ArchiveManager.init() */
+ static void init(ObjectContainer container, ClientContext context,
final long nodeDBHandle) {
+ ObjectSet<ArchiveExtractTag> set = container.query(new
Predicate<ArchiveExtractTag>() {
+ public boolean match(ArchiveExtractTag tag) {
+ return tag.nodeDBHandle == nodeDBHandle;
+ }
+ });
+ while(set.hasNext()) {
+ ArchiveExtractTag tag = set.next();
+ tag.activateForExecution(container);
+ runPersistentOffThread(tag, context,
context.archiveManager, context.persistentBucketFactory);
+ }
+ }
+
+ private static class ProxyCallback implements ArchiveExtractCallback {
+
+ Bucket data;
+
+ public void gotBucket(Bucket data, ObjectContainer container,
ClientContext context) {
+ this.data = data;
+ }
+
+ public void notInArchive(ObjectContainer container,
ClientContext context) {
+ this.data = null;
+ }
+
+ public void onFailed(ArchiveRestartException e, ObjectContainer
container, ClientContext context) {
+ // Must not be called.
+ throw new UnsupportedOperationException();
+ }
+
+ public void onFailed(ArchiveFailureException e, ObjectContainer
container, ClientContext context) {
+ // Must not be called.
+ throw new UnsupportedOperationException();
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
+ }
+
+ public void activateForExecution(ObjectContainer container) {
+ container.activate(this, 1);
+ container.activate(key, 5);
+ }
+
+ public ArchiveHandler cloneHandler() {
+ return new ArchiveHandlerImpl(key.clone(), archiveType,
compressorType, forceRefetchArchive);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(key == null) {
+ Logger.error(this, "removeFrom() : key = null for
"+this+" I exist = "+container.ext().isStored(this)+" I am active:
"+container.ext().isActive(this), new Exception("error"));
+ } else
+ key.removeFrom(container);
+ container.delete(this);
+ }
+
+}
+
+class ArchiveExtractTag {
+
+ final ArchiveHandlerImpl handler;
+ final Bucket data;
+ final boolean freeBucket;
+ final ArchiveContext actx;
+ final String element;
+ final ArchiveExtractCallback callback;
+ final long nodeDBHandle;
+
+ ArchiveExtractTag(ArchiveHandlerImpl handler, Bucket data, boolean
freeBucket, ArchiveContext actx, String element, ArchiveExtractCallback
callback, long nodeDBHandle) {
+ this.handler = handler;
+ this.data = data;
+ this.freeBucket = freeBucket;
+ this.actx = actx;
+ this.element = element;
+ this.callback = callback;
+ this.nodeDBHandle = nodeDBHandle;
+ }
+
+ public void activateForExecution(ObjectContainer container) {
+ container.activate(this, 1);
+ container.activate(data, 5);
+ handler.activateForExecution(container);
+ container.activate(actx, 5);
+ container.activate(callback, 1);
+ }
+
+}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/client/ArchiveManager.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveManager.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveManager.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,14 +4,25 @@
package freenet.client;
import java.io.IOException;
+import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import net.contrapunctus.lzma.LzmaInputStream;
+
+import org.apache.tools.bzip2.CBZip2InputStream;
+import org.apache.tools.tar.TarEntry;
+import org.apache.tools.tar.TarInputStream;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.keys.FreenetURI;
import freenet.support.LRUHashtable;
import freenet.support.Logger;
@@ -21,12 +32,6 @@
import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
import freenet.support.io.BucketTools;
import freenet.support.io.Closer;
-import java.io.InputStream;
-import java.util.zip.GZIPInputStream;
-import net.contrapunctus.lzma.LzmaInputStream;
-import org.apache.tools.bzip2.CBZip2InputStream;
-import org.apache.tools.tar.TarEntry;
-import org.apache.tools.tar.TarInputStream;
/**
* Cache of recently decoded archives:
@@ -99,7 +104,7 @@
// ArchiveHandler's
final int maxArchiveHandlers;
- private final LRUHashtable archiveHandlers;
+ private final LRUHashtable<FreenetURI, ArchiveStoreContext>
archiveHandlers;
// Data cache
/** Maximum number of cached ArchiveStoreItems */
@@ -109,7 +114,7 @@
/** Currently cached data in bytes */
private long cachedData;
/** Map from ArchiveKey to ArchiveStoreElement */
- private final LRUHashtable storedData;
+ private final LRUHashtable<ArchiveKey, ArchiveStoreItem> storedData;
/** Bucket Factory */
private final BucketFactory tempBucketFactory;
@@ -130,17 +135,17 @@
*/
public ArchiveManager(int maxHandlers, long maxCachedData, long
maxArchivedFileSize, int maxCachedElements, BucketFactory tempBucketFactory) {
maxArchiveHandlers = maxHandlers;
- archiveHandlers = new LRUHashtable();
+ archiveHandlers = new LRUHashtable<FreenetURI,
ArchiveStoreContext>();
this.maxCachedElements = maxCachedElements;
this.maxCachedData = maxCachedData;
- storedData = new LRUHashtable();
+ storedData = new LRUHashtable<ArchiveKey, ArchiveStoreItem>();
this.maxArchivedFileSize = maxArchivedFileSize;
this.tempBucketFactory = tempBucketFactory;
logMINOR = Logger.shouldLog(Logger.MINOR, this);
}
/** Add an ArchiveHandler by key */
- private synchronized void putCached(FreenetURI key, ArchiveHandler zip)
{
+ private synchronized void putCached(FreenetURI key, ArchiveStoreContext
zip) {
if(logMINOR) Logger.minor(this, "Put cached AH for "+key+" :
"+zip);
archiveHandlers.push(key, zip);
while(archiveHandlers.size() > maxArchiveHandlers)
@@ -148,9 +153,9 @@
}
/** Get an ArchiveHandler by key */
- public ArchiveHandler getCached(FreenetURI key) {
+ ArchiveStoreContext getCached(FreenetURI key) {
if(logMINOR) Logger.minor(this, "Get cached AH for "+key);
- ArchiveHandler handler = (ArchiveHandler)
archiveHandlers.get(key);
+ ArchiveStoreContext handler = (ArchiveStoreContext)
archiveHandlers.get(key);
if(handler == null) return null;
archiveHandlers.push(key, handler);
return handler;
@@ -165,17 +170,30 @@
* @param archiveType The archive type, defined in Metadata.
* @return An archive handler.
*/
- public synchronized ArchiveHandler makeHandler(FreenetURI key,
ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean returnNullIfNotFound,
boolean forceRefetchArchive) {
- ArchiveHandler handler = null;
- if(!forceRefetchArchive) handler = getCached(key);
+ synchronized ArchiveStoreContext makeContext(FreenetURI key,
ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean returnNullIfNotFound) {
+ ArchiveStoreContext handler = null;
+ handler = getCached(key);
if(handler != null) return handler;
if(returnNullIfNotFound) return null;
- handler = new ArchiveStoreContext(this, key, archiveType,
ctype, forceRefetchArchive);
+ handler = new ArchiveStoreContext(key, archiveType);
putCached(key, handler);
return handler;
}
/**
+ * Create an archive handler. This does not need to know how to
+ * fetch the key, because the methods called later will ask.
+ * It will try to serve from cache, but if that fails, will
+ * re-fetch.
+ * @param key The key of the archive that we are extracting data from.
+ * @param archiveType The archive type, defined in Metadata.
+ * @return An archive handler.
+ */
+ public ArchiveHandler makeHandler(FreenetURI key, ARCHIVE_TYPE
archiveType, COMPRESSOR_TYPE ctype, boolean forceRefetch, boolean persistent) {
+ return new ArchiveHandlerImpl(persistent ? key.clone() : key,
archiveType, ctype, forceRefetch);
+ }
+
+ /**
* Get a cached, previously extracted, file from an archive.
* @param key The key used to fetch the archive.
* @param filename The name of the file within the archive.
@@ -209,6 +227,7 @@
// Soft disk space limit = we go over the limit significantly
when we
// are overloaded.
cachedData -= size;
+ if(logMINOR) Logger.minor(this, "removeCachedItem: "+item);
item.close();
}
@@ -226,15 +245,14 @@
* @throws ArchiveRestartException If the request needs to be restarted
because the archive
* changed.
*/
- public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType,
COMPRESSOR_TYPE ctype, Bucket data, ArchiveContext archiveContext,
ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback)
throws ArchiveFailureException, ArchiveRestartException {
+ public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType,
COMPRESSOR_TYPE ctype, Bucket data, ArchiveContext archiveContext,
ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback,
ObjectContainer container, ClientContext context) throws
ArchiveFailureException, ArchiveRestartException {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
MutableBoolean gotElement = element != null ? new
MutableBoolean() : null;
if(logMINOR) Logger.minor(this, "Extracting "+key);
- ctx.onExtract();
- ctx.removeAllCachedItems(); // flush cache anyway
+ ctx.removeAllCachedItems(this); // flush cache anyway
final long expectedSize = ctx.getLastSize();
final long archiveSize = data.size();
/** Set if we need to throw a RestartedException rather than
returning success,
@@ -283,9 +301,9 @@
}
if(ARCHIVE_TYPE.ZIP == archiveType)
- handleZIPArchive(ctx, key, is, element,
callback, gotElement, throwAtExit);
+ handleZIPArchive(ctx, key, is, element,
callback, gotElement, throwAtExit, container, context);
else if(ARCHIVE_TYPE.TAR == archiveType)
- handleTARArchive(ctx, key, is, element,
callback, gotElement, throwAtExit);
+ handleTARArchive(ctx, key, is, element,
callback, gotElement, throwAtExit, container, context);
else
throw new ArchiveFailureException("Unknown or
unsupported archive algorithm " + archiveType);
} catch (IOException ioe) {
@@ -295,7 +313,7 @@
}
}
- private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit) throws ArchiveFailureException,
ArchiveRestartException {
+ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit, ObjectContainer container,
ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
if(logMINOR) Logger.minor(this, "Handling a TAR Archive");
TarInputStream tarIS = null;
try {
@@ -305,7 +323,7 @@
TarEntry entry;
byte[] buf = new byte[32768];
- HashSet names = new HashSet();
+ HashSet<String> names = new HashSet<String>();
boolean gotMetadata = false;
outerTAR: while(true) {
@@ -341,7 +359,7 @@
out.close();
if(name.equals(".metadata"))
gotMetadata = true;
- addStoreElement(ctx, key, name, output,
gotElement, element, callback);
+ addStoreElement(ctx, key, name, output,
gotElement, element, callback, container, context);
names.add(name);
trimStoredData();
}
@@ -349,13 +367,13 @@
// If no metadata, generate some
if(!gotMetadata) {
- generateMetadata(ctx, key, names, gotElement,
element, callback);
+ generateMetadata(ctx, key, names, gotElement,
element, callback, container, context);
trimStoredData();
}
if(throwAtExit) throw new
ArchiveRestartException("Archive changed on re-fetch");
if((!gotElement.value) && element != null)
- callback.notInArchive();
+ callback.notInArchive(container, context);
} catch (IOException e) {
throw new ArchiveFailureException("Error reading
archive: "+e.getMessage(), e);
@@ -364,7 +382,7 @@
}
}
- private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit) throws ArchiveFailureException,
ArchiveRestartException {
+ private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit, ObjectContainer container,
ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
if(logMINOR) Logger.minor(this, "Handling a ZIP Archive");
ZipInputStream zis = null;
try {
@@ -374,7 +392,7 @@
ZipEntry entry;
byte[] buf = new byte[32768];
- HashSet names = new HashSet();
+ HashSet<String> names = new HashSet<String>();
boolean gotMetadata = false;
outerZIP: while(true) {
@@ -410,7 +428,7 @@
out.close();
if(name.equals(".metadata"))
gotMetadata = true;
- addStoreElement(ctx, key, name, output,
gotElement, element, callback);
+ addStoreElement(ctx, key, name, output,
gotElement, element, callback, container, context);
names.add(name);
trimStoredData();
}
@@ -418,13 +436,13 @@
// If no metadata, generate some
if(!gotMetadata) {
- generateMetadata(ctx, key, names, gotElement,
element, callback);
+ generateMetadata(ctx, key, names, gotElement,
element, callback, container, context);
trimStoredData();
}
if(throwAtExit) throw new
ArchiveRestartException("Archive changed on re-fetch");
if((!gotElement.value) && element != null)
- callback.notInArchive();
+ callback.notInArchive(container, context);
} catch (IOException e) {
throw new ArchiveFailureException("Error reading
archive: "+e.getMessage(), e);
@@ -449,7 +467,7 @@
* @param callbackName If we generate a
* @throws ArchiveFailureException
*/
- private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx,
FreenetURI key, HashSet names, MutableBoolean gotElement, String element2,
ArchiveExtractCallback callback) throws ArchiveFailureException {
+ private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx,
FreenetURI key, HashSet names, MutableBoolean gotElement, String element2,
ArchiveExtractCallback callback, ObjectContainer container, ClientContext
context) throws ArchiveFailureException {
/* What we have to do is to:
* - Construct a filesystem tree of the names.
* - Turn each level of the tree into a Metadata object,
including those below it, with
@@ -459,7 +477,7 @@
*/
// Root directory.
// String -> either itself, or another HashMap
- HashMap dir = new HashMap();
+ HashMap<String, Object> dir = new HashMap<String, Object>();
Iterator i = names.iterator();
while(i.hasNext()) {
String name = (String) i.next();
@@ -475,10 +493,10 @@
OutputStream os = bucket.getOutputStream();
os.write(buf);
os.close();
- return addStoreElement(ctx, key, ".metadata",
bucket, gotElement, element2, callback);
+ return addStoreElement(ctx, key, ".metadata",
bucket, gotElement, element2, callback, container, context);
} catch (MetadataUnresolvedException e) {
try {
- x = resolve(e, x, bucket, ctx, key,
gotElement, element2, callback);
+ x = resolve(e, x, bucket, ctx, key,
gotElement, element2, callback, container, context);
} catch (IOException e1) {
throw new
ArchiveFailureException("Failed to create metadata: "+e1, e1);
}
@@ -489,7 +507,7 @@
}
}
- private int resolve(MetadataUnresolvedException e, int x, Bucket
bucket, ArchiveStoreContext ctx, FreenetURI key, MutableBoolean gotElement,
String element2, ArchiveExtractCallback callback) throws IOException,
ArchiveFailureException {
+ private int resolve(MetadataUnresolvedException e, int x, Bucket
bucket, ArchiveStoreContext ctx, FreenetURI key, MutableBoolean gotElement,
String element2, ArchiveExtractCallback callback, ObjectContainer container,
ClientContext context) throws IOException, ArchiveFailureException {
Metadata[] m = e.mustResolve;
for(int i=0;i<m.length;i++) {
try {
@@ -497,15 +515,15 @@
OutputStream os = bucket.getOutputStream();
os.write(buf);
os.close();
- addStoreElement(ctx, key, ".metadata-"+(x++),
bucket, gotElement, element2, callback);
+ addStoreElement(ctx, key, ".metadata-"+(x++),
bucket, gotElement, element2, callback, container, context);
} catch (MetadataUnresolvedException e1) {
- x = resolve(e, x, bucket, ctx, key, gotElement,
element2, callback);
+ x = resolve(e, x, bucket, ctx, key, gotElement,
element2, callback, container, context);
}
}
return x;
}
- private void addToDirectory(HashMap dir, String name, String prefix)
throws ArchiveFailureException {
+ private void addToDirectory(HashMap<String, Object> dir, String name,
String prefix) throws ArchiveFailureException {
int x = name.indexOf('/');
if(x < 0) {
if(dir.containsKey(name)) {
@@ -521,9 +539,9 @@
} else
after = name.substring(x+1, name.length());
Object o = dir.get(before);
- HashMap map = (HashMap) o;
+ HashMap<String, Object> map = (HashMap<String, Object>)
o;
if(o == null) {
- map = new HashMap();
+ map = new HashMap<String, Object>();
dir.put(before, map);
}
if(o instanceof String) {
@@ -552,6 +570,7 @@
if(oldItem != null) {
oldItem.close();
cachedData -= oldItem.spaceUsed();
+ if(logMINOR) Logger.minor(this, "Dropping old
store element from archive cache: "+oldItem);
}
}
}
@@ -567,7 +586,7 @@
* @throws ArchiveFailureException If a failure occurred resulting in
the data not being readable. Only happens if
* callback != null.
*/
- private ArchiveStoreItem addStoreElement(ArchiveStoreContext ctx,
FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String
callbackName, ArchiveExtractCallback callback) throws ArchiveFailureException {
+ private ArchiveStoreItem addStoreElement(ArchiveStoreContext ctx,
FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String
callbackName, ArchiveExtractCallback callback, ObjectContainer container,
ClientContext context) throws ArchiveFailureException {
RealArchiveStoreItem element = new RealArchiveStoreItem(ctx,
key, name, temp);
if(logMINOR) Logger.minor(this, "Adding store element:
"+element+" ( "+key+ ' ' +name+" size "+element.spaceUsed()+" )");
ArchiveStoreItem oldItem;
@@ -577,7 +596,7 @@
matchBucket = element.getReaderBucket();
}
synchronized (this) {
- oldItem = (ArchiveStoreItem)
storedData.get(element.key);
+ oldItem = storedData.get(element.key);
storedData.push(element.key, element);
cachedData += element.spaceUsed();
if(oldItem != null) {
@@ -587,7 +606,7 @@
}
}
if(matchBucket != null) {
- callback.gotBucket(matchBucket);
+ callback.gotBucket(matchBucket, container, context);
gotElement.value = true;
}
return element;
@@ -618,4 +637,14 @@
}
}
}
+
+ public static void init(ObjectContainer container, ClientContext
context, final long nodeDBHandle) {
+ ArchiveHandlerImpl.init(container, context, nodeDBHandle);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing ArchiveManager in database",
new Exception("error"));
+ return false;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/ArchiveStoreContext.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveStoreContext.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveStoreContext.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -6,8 +6,6 @@
import freenet.keys.FreenetURI;
import freenet.support.DoublyLinkedListImpl;
import freenet.support.Logger;
-import freenet.support.api.Bucket;
-import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
/**
* Tracks all files currently in the cache from a given key.
@@ -17,17 +15,11 @@
* subject to the above.
*
* Always take the lock on ArchiveStoreContext before the lock on
ArchiveManager, NOT the other way around.
- *
- * Not normally to be used directly by external packages, but public for
- * ArchiveManager.extractToCache. FIXME.
*/
-public class ArchiveStoreContext implements ArchiveHandler {
+public class ArchiveStoreContext {
- private ArchiveManager manager;
private FreenetURI key;
private final ArchiveManager.ARCHIVE_TYPE archiveType;
- private final COMPRESSOR_TYPE compressorType;
- private boolean forceRefetchArchive;
/** Archive size */
private long lastSize = -1;
/** Archive hash */
@@ -36,80 +28,44 @@
* Note that we never ever hold this and then take another lock! In
particular
* we must not take the ArchiveManager lock while holding this lock. It
must be
* the inner lock to avoid deadlocks. */
- private final DoublyLinkedListImpl myItems;
+ private final DoublyLinkedListImpl<ArchiveStoreItem> myItems;
- public ArchiveStoreContext(ArchiveManager manager, FreenetURI key,
ArchiveManager.ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean
forceRefetchArchive) {
- this.manager = manager;
+ ArchiveStoreContext(FreenetURI key, ArchiveManager.ARCHIVE_TYPE
archiveType) {
this.key = key;
this.archiveType = archiveType;
- this.compressorType = ctype;
- myItems = new DoublyLinkedListImpl();
- this.forceRefetchArchive = forceRefetchArchive;
+ myItems = new DoublyLinkedListImpl<ArchiveStoreItem>();
}
- /**
- * Get the metadata for a given archive.
- * @return A Bucket containing the metadata, in binary format, for the
archive.
- */
- public Bucket getMetadata(ArchiveContext archiveContext, ClientMetadata
dm, int recursionLevel,
- boolean dontEnterImplicitArchives) throws
ArchiveFailureException, ArchiveRestartException, MetadataParseException,
FetchException {
- return get(".metadata", archiveContext, dm, recursionLevel,
dontEnterImplicitArchives);
- }
-
- /**
- * Fetch a file in an archive.
- * @return A Bucket containing the data. This will not be freed until
the
- * client is finished with it i.e. calls free() or it is finalized.
- */
- public Bucket get(String internalName, ArchiveContext archiveContext,
ClientMetadata dm, int recursionLevel,
- boolean dontEnterImplicitArchives) throws
ArchiveFailureException, ArchiveRestartException, MetadataParseException,
FetchException {
-
- // Do loop detection on the archive that we are about to fetch.
- archiveContext.doLoopDetection(key);
-
- if(forceRefetchArchive) return null;
-
- Bucket data;
-
- // Fetch from cache
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Checking cache: "+key+ ' '
+internalName);
- if((data = manager.getCached(key, internalName)) != null) {
- return data;
- }
-
- return null;
- }
-
/** Returns the size of the archive last time we fetched it, or -1 */
long getLastSize() {
return lastSize;
}
/** Sets the size of the archive - @see getLastSize() */
- public void setLastSize(long size) {
+ void setLastSize(long size) {
lastSize = size;
}
/** Returns the hash of the archive last time we fetched it, or null */
- public byte[] getLastHash() {
+ byte[] getLastHash() {
return lastHash;
}
/** Sets the hash of the archive - @see getLastHash() */
- public void setLastHash(byte[] realHash) {
+ void setLastHash(byte[] realHash) {
lastHash = realHash;
}
/**
* Remove all ArchiveStoreItems with this key from the cache.
*/
- public void removeAllCachedItems() {
+ void removeAllCachedItems(ArchiveManager manager) {
ArchiveStoreItem item = null;
while(true) {
synchronized (myItems) {
- item = (ArchiveStoreItem) myItems.pop();
+ // removeCachedItem() will call removeItem(),
so don't remove it here.
+ item = (ArchiveStoreItem) myItems.head();
}
if(item == null) break;
manager.removeCachedItem(item);
@@ -117,7 +73,7 @@
}
/** Notify that a new archive store item with this key has been added
to the cache. */
- public void addItem(ArchiveStoreItem item) {
+ void addItem(ArchiveStoreItem item) {
synchronized(myItems) {
myItems.push(item);
}
@@ -126,9 +82,13 @@
/** Notify that an archive store item with this key has been expelled
from the
* cache. Remove it from our local cache and ask it to free the bucket
if
* necessary. */
- public void removeItem(ArchiveStoreItem item) {
+ void removeItem(ArchiveStoreItem item) {
synchronized(myItems) {
- if(myItems.remove(item) == null) return; // only
removed once
+ if(myItems.remove(item) == null) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Not removing:
"+item+" for "+this+" - already removed");
+ return; // only removed once
+ }
}
item.innerClose();
}
@@ -141,12 +101,4 @@
return key;
}
- public void extractToCache(Bucket bucket, ArchiveContext actx, String
element, ArchiveExtractCallback callback) throws ArchiveFailureException,
ArchiveRestartException {
- manager.extractToCache(key, archiveType, compressorType,
bucket, actx, this, element, callback);
- }
-
- /** Called just before extracting this container to the cache */
- public void onExtract() {
- forceRefetchArchive = false;
- }
}
Modified: trunk/freenet/src/freenet/client/ArchiveStoreItem.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveStoreItem.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ArchiveStoreItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,7 +9,7 @@
/**
* Base class for items stored in the archive cache.
*/
-abstract class ArchiveStoreItem extends DoublyLinkedListImpl.Item {
+abstract class ArchiveStoreItem extends
DoublyLinkedListImpl.Item<ArchiveStoreItem> {
final ArchiveKey key;
final ArchiveStoreContext context;
Modified: trunk/freenet/src/freenet/client/ClientMetadata.java
===================================================================
--- trunk/freenet/src/freenet/client/ClientMetadata.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/ClientMetadata.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
/**
* Stores the metadata that the client might actually be interested in.
*/
@@ -41,9 +43,9 @@
}
@Override
- public Object clone() {
+ public ClientMetadata clone() {
try {
- return super.clone();
+ return (ClientMetadata) super.clone();
} catch (CloneNotSupportedException e) {
throw new Error(e);
}
@@ -67,4 +69,8 @@
}
return s;
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Copied: trunk/freenet/src/freenet/client/FECCallback.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/FECCallback.java)
===================================================================
--- trunk/freenet/src/freenet/client/FECCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/client/FECCallback.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,33 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.support.api.Bucket;
+
+/**
+ * An interface wich has to be implemented by FECJob submitters
+ *
+ * @author Florent Daignière <nextgens at freenetproject.org>
+ *
+ * WARNING: the callback is expected to release the thread !
+ */
+public interface FECCallback {
+
+ /**
+ * The implementor MUST copy the data manually from the arrays on the
FECJob, because
+ * db4o persists arrays as inline values, so WE CANNOT UPDATE THE
ARRAY!!
+ * @param container
+ * @param context
+ * @param job
+ */
+ public void onEncodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets, Bucket[] checkBuckets,
SplitfileBlock[] dataBlocks, SplitfileBlock[] checkBlocks);
+
+ public void onDecodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets, Bucket[] checkBuckets,
SplitfileBlock[] dataBlocks, SplitfileBlock[] checkBlocks);
+
+ /** Something broke. */
+ public void onFailed(Throwable t, ObjectContainer container,
ClientContext context);
+}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/client/FECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/FECCodec.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/FECCodec.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -6,21 +6,17 @@
import java.io.DataInputStream;
import java.io.IOException;
import java.io.OutputStream;
-import java.util.LinkedList;
+import com.db4o.ObjectContainer;
import com.onionnetworks.fec.FECCode;
import com.onionnetworks.util.Buffer;
-import freenet.node.PrioRunnable;
import freenet.support.Executor;
import freenet.support.Logger;
-import freenet.support.OOMHandler;
-import freenet.support.OOMHook;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
import freenet.support.io.BucketTools;
import freenet.support.io.Closer;
-import freenet.support.io.NativeThread;
/**
* FEC (forward error correction) handler.
@@ -29,33 +25,35 @@
* @author root
*
*/
-public abstract class FECCodec implements OOMHook {
+public abstract class FECCodec {
// REDFLAG: Optimal stripe size? Smaller => less memory usage, but more
JNI overhead
private static int STRIPE_SIZE = 4096;
static boolean logMINOR;
- FECCode fec;
+ protected transient FECCode fec;
protected final int k, n;
- private final Executor executor;
- protected FECCodec(Executor executor, int k, int n) {
- this.executor = executor;
+ protected abstract void loadFEC();
+
+ protected FECCodec(int k, int n) {
this.k = k;
this.n = n;
-
- OOMHandler.addOOMHook(this);
+ if(n == 0 || n < k)
+ throw new IllegalArgumentException("Invalid: k="+k+"
n="+n);
}
/**
* Get a codec where we know both the number of data blocks and the
number
* of check blocks, and the codec type. Normally for decoding.
*/
- public static FECCodec getCodec(short splitfileType, int dataBlocks,
int checkBlocks, Executor executor) {
+ public static FECCodec getCodec(short splitfileType, int dataBlocks,
int checkBlocks) {
+ if(Logger.shouldLog(Logger.MINOR, FECCodec.class))
+ Logger.minor(FECCodec.class, "getCodec:
splitfileType="+splitfileType+" dataBlocks="+dataBlocks+"
checkBlocks="+checkBlocks);
if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT)
return null;
if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD)
- return StandardOnionFECCodec.getInstance(dataBlocks,
checkBlocks, executor);
+ return StandardOnionFECCodec.getInstance(dataBlocks,
checkBlocks);
else
return null;
}
@@ -64,34 +62,47 @@
* Get a codec where we know only the number of data blocks and the
codec
* type. Normally for encoding.
*/
- public static FECCodec getCodec(short splitfileType, int dataBlocks,
Executor executor) {
+ public static FECCodec getCodec(short splitfileType, int dataBlocks) {
if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT)
return null;
if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
- /**
- * ALCHEMY: What we do know is that redundancy by FEC
is much more efficient than
- * redundancy by simply duplicating blocks, for obvious
reasons (see e.g. Wuala). But
- * we have to have some redundancy at the duplicating
blocks level because we do use
- * some keys directly etc: we store an insert in 3
nodes. We also cache it on 20 nodes,
- * but generally the key will fall out of the caches
within days. So long term, it's 3.
- * Multiplied by 2 here, makes 6. Used to be 1.5 * 3 =
4.5. Wuala uses 5, but that's
- * all FEC.
- */
- int checkBlocks = dataBlocks *
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT /
HighLevelSimpleClientImpl.SPLITFILE_BLOCKS_PER_SEGMENT;
- if(dataBlocks >=
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT)
- checkBlocks =
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT;
- return StandardOnionFECCodec.getInstance(dataBlocks,
checkBlocks, executor);
+ int checkBlocks = standardOnionCheckBlocks(dataBlocks);
+ return StandardOnionFECCodec.getInstance(dataBlocks,
checkBlocks);
}
else
return null;
}
+
+ private static int standardOnionCheckBlocks(int dataBlocks) {
+ /**
+ * ALCHEMY: What we do know is that redundancy by FEC is much
more efficient than
+ * redundancy by simply duplicating blocks, for obvious reasons
(see e.g. Wuala). But
+ * we have to have some redundancy at the duplicating blocks
level because we do use
+ * some keys directly etc: we store an insert in 3 nodes. We
also cache it on 20 nodes,
+ * but generally the key will fall out of the caches within
days. So long term, it's 3.
+ * Multiplied by 2 here, makes 6. Used to be 1.5 * 3 = 4.5.
Wuala uses 5, but that's
+ * all FEC.
+ */
+ int checkBlocks = dataBlocks *
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT /
HighLevelSimpleClientImpl.SPLITFILE_BLOCKS_PER_SEGMENT;
+ if(dataBlocks >=
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT)
+ checkBlocks =
HighLevelSimpleClientImpl.SPLITFILE_CHECK_BLOCKS_PER_SEGMENT;
+ return checkBlocks;
+ }
+ public static int getCheckBlocks(short splitfileType, int dataBlocks) {
+ if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
+ return standardOnionCheckBlocks(dataBlocks);
+ } else
+ return 0;
+ }
+
/**
* How many check blocks?
*/
public abstract int countCheckBlocks();
protected void realDecode(SplitfileBlock[] dataBlockStatus,
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory bf) throws
IOException {
+ loadFEC();
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR)
Logger.minor(this, "Doing decode: " +
dataBlockStatus.length + " data blocks, " + checkBlockStatus.length + " check
blocks, block length " + blockLength + " with " + this, new Exception("debug"));
@@ -145,20 +156,7 @@
long sz = buckets[i].size();
if(sz < blockLength) {
if(i != dataBlockStatus.length
- 1)
- throw new
IllegalArgumentException("All buckets except the last must be the full size but
data bucket " + i + " of " + dataBlockStatus.length + " (" + dataBlockStatus[i]
+ ") is " + sz + " not " + blockLength);
- if(sz < blockLength) {
- // FIXME NOT FETCHING
LAST BLOCK
-// buckets[i] =
BucketTools.pad(buckets[i], blockLength, bf, (int) sz);
- buckets[i].free();
- buckets[i] =
bf.makeBucket(blockLength);
- writers[i] =
buckets[i].getOutputStream();
- if(logMINOR)
-
Logger.minor(this, "writers[" + i + "] != null (NOT PADDING)");
- readers[i] = null;
- numberToDecode++;
- }
- else
- throw new
IllegalArgumentException("Too big: " + sz + " bigger than " + blockLength);
+ throw new
IllegalArgumentException("All buckets must be the full size (caller must pad if
needed) but data bucket " + i + " of " + dataBlockStatus.length + " (" +
dataBlockStatus[i] + ") is " + sz + " not " + blockLength);
} else {
if(logMINOR)
Logger.minor(this,
"writers[" + i + "] = null (already filled)");
@@ -233,6 +231,7 @@
protected void realEncode(Bucket[] dataBlockStatus,
Bucket[] checkBlockStatus, int blockLength, BucketFactory bf)
throws IOException {
+ loadFEC();
logMINOR = Logger.shouldLog(Logger.MINOR, this);
// Runtime.getRuntime().gc();
// Runtime.getRuntime().runFinalization();
@@ -270,29 +269,29 @@
for(int i = 0; i < dataBlockStatus.length; i++) {
buckets[i] = dataBlockStatus[i];
+ if(buckets[i] == null)
+ throw new NullPointerException("Data
bucket "+i+" is null!");
long sz = buckets[i].size();
if(sz < blockLength) {
- if(i != dataBlockStatus.length - 1)
- throw new
IllegalArgumentException("All buckets except the last must be the full size");
- if(sz < blockLength) {
- buckets[i] =
BucketTools.pad(buckets[i], blockLength, bf, (int) sz);
- toFree = buckets[i];
- } else
- throw new
IllegalArgumentException("Too big: " + sz + " bigger than " + blockLength);
+ throw new IllegalArgumentException("All
buckets must be the full size: caller must pad the last one if needed");
}
readers[i] = new
DataInputStream(buckets[i].getInputStream());
}
+ int created = 0;
for(int i = 0; i < checkBlockStatus.length; i++) {
buckets[i + k] = checkBlockStatus[i];
if(buckets[i + k] == null) {
buckets[i + k] =
bf.makeBucket(blockLength);
writers[i] = buckets[i +
k].getOutputStream();
toEncode[numberToEncode++] = i + k;
+ created++;
}
else
writers[i] = null;
}
+ if(logMINOR)
+ Logger.minor(this, "Created "+created+" check
buckets");
// Runtime.getRuntime().gc();
// Runtime.getRuntime().runFinalization();
@@ -367,148 +366,13 @@
*
* @param FECJob
*/
- public void addToQueue(FECJob job) {
- addToQueue(job, this, executor);
+ public void addToQueue(FECJob job, FECQueue queue, ObjectContainer
container) {
+ queue.addToQueue(job, this, container);
}
-
- public static void addToQueue(FECJob job, FECCodec codec, Executor
executor) {
- int maxThreads = getMaxRunningFECThreads();
- synchronized(_awaitingJobs) {
- _awaitingJobs.addFirst(job);
- if(runningFECThreads < maxThreads) {
- executor.execute(fecRunner, "FEC Pool(" +
(fecPoolCounter++) + ")");
- runningFECThreads++;
- }
- _awaitingJobs.notifyAll();
- }
- if(logMINOR)
- Logger.minor(StandardOnionFECCodec.class, "Adding a new
job to the queue (" + _awaitingJobs.size() + ").");
- }
- private static final LinkedList _awaitingJobs = new LinkedList();
- private static final FECRunner fecRunner = new FECRunner();
- private static int runningFECThreads;
- private static int fecPoolCounter;
- private synchronized static int getMaxRunningFECThreads() {
- if (maxRunningFECThreads != -1)
- return maxRunningFECThreads;
- String osName = System.getProperty("os.name");
- if(osName.indexOf("Windows") == -1 &&
(osName.toLowerCase().indexOf("mac os x") > 0) ||
(!NativeThread.usingNativeCode())) {
- // OS/X niceness is really weak, so we don't want any
more background CPU load than necessary
- // Also, on non-Windows, we need the native threads
library to be working.
- maxRunningFECThreads = 1;
- } else {
- // Most other OSs will have reasonable niceness, so go
by RAM.
- Runtime r = Runtime.getRuntime();
- int max = r.availableProcessors(); // FIXME this may
change in a VM, poll it
- long maxMemory = r.maxMemory();
- if(maxMemory < 256*1024*1024) {
- max = 1;
- } else {
- // Measured 11MB decode 8MB encode on amd64.
- // No more than 10% of available RAM, so 110MB
for each extra processor.
- // No more than 3 so that we don't reach any
FileDescriptor related limit
- max = Math.min(3, Math.min(max, (int)
(Math.min(Integer.MAX_VALUE, maxMemory / (128*1024*1024)))));
- }
- maxRunningFECThreads = max;
- }
- Logger.minor(FECCodec.class, "Maximum FEC threads:
"+maxRunningFECThreads);
- return maxRunningFECThreads;
+ public void objectCanDeactivate(ObjectContainer container) {
+ Logger.minor(this, "Deactivating "+this, new
Exception("debug"));
}
-
- private static int maxRunningFECThreads = -1;
- /**
- * A private Thread started by {@link FECCodec}...
- *
- * @author Florent Daignière <nextgens at
freenetproject.org>
- */
- private static class FECRunner implements PrioRunnable {
-
- public void run() {
- freenet.support.Logger.OSThread.logPID(this);
- try {
- while(true) {
- FECJob job = null;
- // Get a job
- synchronized (_awaitingJobs) {
- while (_awaitingJobs.isEmpty())
{
-
_awaitingJobs.wait(Integer.MAX_VALUE);
- if (runningFECThreads >
getMaxRunningFECThreads())
- return;
- }
- job = (FECJob)
_awaitingJobs.removeLast();
- }
-
- // Encode it
- try {
- if (job.isADecodingJob)
-
job.codec.realDecode(job.dataBlockStatus, job.checkBlockStatus, job.blockLength,
-
job.bucketFactory);
- else {
-
job.codec.realEncode(job.dataBlocks, job.checkBlocks, job.blockLength,
job.bucketFactory);
- // Update
SplitFileBlocks from buckets if necessary
- if
((job.dataBlockStatus != null) || (job.checkBlockStatus != null)) {
- for (int i = 0;
i < job.dataBlocks.length; i++)
-
job.dataBlockStatus[i].setData(job.dataBlocks[i]);
- for (int i = 0;
i < job.checkBlocks.length; i++)
-
job.checkBlockStatus[i].setData(job.checkBlocks[i]);
- }
- }
- } catch (IOException e) {
- Logger.error(this, "BOH! ioe:"
+ e.getMessage(), e);
- }
-
- // Call the callback
- try {
- if (job.isADecodingJob)
-
job.callback.onDecodedSegment();
- else
-
job.callback.onEncodedSegment();
- } catch (Throwable e) {
- Logger.error(this, "The
callback failed!" + e.getMessage(), e);
- }
- }
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t+" in "+this, t);
- }
- finally {
- synchronized (_awaitingJobs) {
- runningFECThreads--;
- }
- }
- }
-
- public int getPriority() {
- return NativeThread.LOW_PRIORITY;
- }
- }
-
- public void handleLowMemory() throws Exception {
- synchronized (_awaitingJobs) {
- maxRunningFECThreads = Math.max(1, maxRunningFECThreads
- 1);
- _awaitingJobs.notify(); // not notifyAll()
- }
- }
-
- public void handleOutOfMemory() throws Exception {
- synchronized (_awaitingJobs) {
- maxRunningFECThreads = 1;
- _awaitingJobs.notifyAll();
- }
- }
-
- /**
- * An interface wich has to be implemented by FECJob submitters
- *
- * @author Florent Daignière <nextgens at
freenetproject.org>
- *
- * WARNING: the callback is expected to release the thread !
- */
- public interface StandardOnionFECCodecEncoderCallback {
-
- public void onEncodedSegment();
-
- public void onDecodedSegment();
- }
+ public abstract short getAlgorithm();
}
Modified: trunk/freenet/src/freenet/client/FECJob.java
===================================================================
--- trunk/freenet/src/freenet/client/FECJob.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/FECJob.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,7 +3,11 @@
*/
package freenet.client;
-import freenet.client.FECCodec.StandardOnionFECCodecEncoderCallback;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.support.Executor;
+import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
@@ -14,19 +18,44 @@
*/
public class FECJob {
- final FECCodec codec;
+ private transient FECCodec codec;
+ private final short fecAlgo;
final Bucket[] dataBlocks, checkBlocks;
final SplitfileBlock[] dataBlockStatus, checkBlockStatus;
final BucketFactory bucketFactory;
final int blockLength;
- final StandardOnionFECCodecEncoderCallback callback;
+ final FECCallback callback;
final boolean isADecodingJob;
+ final long addedTime;
+ final short priority;
+ final boolean persistent;
+ /** Parent queue */
+ final FECQueue queue;
+ // A persistent hash code helps with debugging.
+ private final int hashCode;
+ transient boolean running;
- public FECJob(FECCodec codec, SplitfileBlock[] dataBlockStatus,
SplitfileBlock[] checkBlockStatus, int blockLength, BucketFactory
bucketFactory, StandardOnionFECCodecEncoderCallback callback, boolean
isADecodingJob) {
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public FECJob(FECCodec codec, FECQueue queue, SplitfileBlock[]
dataBlockStatus, SplitfileBlock[] checkBlockStatus, int blockLength,
BucketFactory bucketFactory, FECCallback callback, boolean isADecodingJob,
short priority, boolean persistent) {
this.codec = codec;
- this.dataBlockStatus = dataBlockStatus;
- this.checkBlockStatus = checkBlockStatus;
+ this.fecAlgo = codec.getAlgorithm();
+ this.queue = queue;
+ this.priority = priority;
+ this.addedTime = System.currentTimeMillis();
+ this.dataBlockStatus = new
SplitfileBlock[dataBlockStatus.length];
+ this.checkBlockStatus = new
SplitfileBlock[checkBlockStatus.length];
+ for(int i=0;i<dataBlockStatus.length;i++)
+ this.dataBlockStatus[i] = dataBlockStatus[i];
+ for(int i=0;i<checkBlockStatus.length;i++)
+ this.checkBlockStatus[i] = checkBlockStatus[i];
+
+// this.dataBlockStatus = dataBlockStatus;
+// this.checkBlockStatus = checkBlockStatus;
+
this.dataBlocks = new Bucket[dataBlockStatus.length];
this.checkBlocks = new Bucket[checkBlockStatus.length];
for(int i=0;i<dataBlocks.length;i++)
@@ -37,11 +66,22 @@
this.blockLength = blockLength;
this.bucketFactory = bucketFactory;
this.callback = callback;
- this.isADecodingJob = isADecodingJob;
+ this.isADecodingJob = isADecodingJob;
+ this.persistent = persistent;
+ this.hashCode = super.hashCode();
}
- public FECJob(FECCodec codec, Bucket[] dataBlocks, Bucket[]
checkBlocks, int blockLength, BucketFactory bucketFactory,
StandardOnionFECCodecEncoderCallback callback, boolean isADecodingJob) {
+ public String toString() {
+ return
super.toString()+":decode="+isADecodingJob+":callback="+callback+":persistent="+persistent;
+ }
+
+ public FECJob(FECCodec codec, FECQueue queue, Bucket[] dataBlocks,
Bucket[] checkBlocks, int blockLength, BucketFactory bucketFactory, FECCallback
callback, boolean isADecodingJob, short priority, boolean persistent) {
+ this.hashCode = super.hashCode();
this.codec = codec;
+ this.fecAlgo = codec.getAlgorithm();
+ this.queue = queue;
+ this.priority = priority;
+ this.addedTime = System.currentTimeMillis();
this.dataBlocks = dataBlocks;
this.checkBlocks = checkBlocks;
this.dataBlockStatus = null;
@@ -50,5 +90,142 @@
this.bucketFactory = bucketFactory;
this.callback = callback;
this.isADecodingJob = isADecodingJob;
+ this.persistent = persistent;
}
+
+ public FECCodec getCodec() {
+ if(codec == null) {
+ codec = FECCodec.getCodec(fecAlgo, dataBlocks.length,
checkBlocks.length);
+ if(codec == null)
+ Logger.error(this, "No codec found for algo
"+fecAlgo+" data blocks length "+dataBlocks.length+" check blocks length
"+checkBlocks.length);
+ }
+ return codec;
+ }
+
+ public void activateForExecution(ObjectContainer container) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "Activating FECJob...");
+ if(dataBlockStatus != null && logMINOR) {
+ for(int i=0;i<dataBlockStatus.length;i++)
+ Logger.minor(this, "Data block status "+i+":
"+dataBlockStatus[i]+" (before activation)");
+ }
+ container.activate(this, 2);
+ if(dataBlockStatus != null) {
+ for(int i=0;i<dataBlockStatus.length;i++)
+ container.activate(dataBlockStatus[i], 2);
+ }
+ if(dataBlockStatus != null && logMINOR) {
+ for(int i=0;i<dataBlockStatus.length;i++)
+ Logger.minor(this, "Data block status "+i+":
"+dataBlockStatus[i]+" (after activation)");
+ }
+ if(checkBlockStatus != null) {
+ for(int i=0;i<checkBlockStatus.length;i++)
+ container.activate(checkBlockStatus[i], 2);
+ }
+ if(dataBlocks != null) {
+ for(int i=0;i<dataBlocks.length;i++) {
+ Logger.minor(this, "Data bucket "+i+":
"+dataBlocks[i]+" (before activation)");
+ container.activate(dataBlocks[i], 1);
+ Logger.minor(this, "Data bucket "+i+":
"+dataBlocks[i]+" (after activation)");
+ }
+ }
+ if(checkBlocks != null) {
+ for(int i=0;i<checkBlocks.length;i++)
+ container.activate(checkBlocks[i], 1);
+ }
+
+ }
+
+ public void deactivate(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Deactivating FECJob...");
+ if(dataBlockStatus != null) {
+ for(int i=0;i<dataBlockStatus.length;i++)
+ container.deactivate(dataBlockStatus[i], 2);
+ }
+ if(checkBlockStatus != null) {
+ for(int i=0;i<checkBlockStatus.length;i++)
+ container.deactivate(checkBlockStatus[i], 2);
+ }
+ if(dataBlocks != null) {
+ for(int i=0;i<dataBlocks.length;i++)
+ container.deactivate(dataBlocks[i], 1);
+ }
+ if(checkBlocks != null) {
+ for(int i=0;i<checkBlocks.length;i++)
+ container.deactivate(checkBlocks[i], 1);
+ }
+ }
+
+ public void storeBlockStatuses(ObjectContainer container) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "Storing block statuses");
+ if(dataBlockStatus != null) {
+ for(int i=0;i<dataBlockStatus.length;i++) {
+ SplitfileBlock block = dataBlockStatus[i];
+ if(logMINOR) Logger.minor(this, "Storing data
block "+i+": "+block);
+ block.storeTo(container);
+ }
+ }
+ if(checkBlockStatus != null) {
+ for(int i=0;i<checkBlockStatus.length;i++) {
+ SplitfileBlock block = checkBlockStatus[i];
+ if(logMINOR) Logger.minor(this, "Storing check
block "+i+": "+block);
+ block.storeTo(container);
+ }
+ }
+ }
+
+ public boolean isCancelled(ObjectContainer container) {
+ if(callback == null) {
+ for(Bucket data : dataBlocks) {
+ if(data != null) {
+ Logger.error(this, "Callback is null
(deleted??) but data is valid: "+data);
+ data.free();
+ data.removeFrom(container);
+ }
+ }
+ for(Bucket data : checkBlocks) {
+ if(data != null) {
+ Logger.error(this, "Callback is null
(deleted??) but data is valid: "+data);
+ data.free();
+ data.removeFrom(container);
+ }
+ }
+ for(SplitfileBlock block : dataBlockStatus) {
+ if(block != null) {
+ Logger.error(this, "Callback is null
(deleted??) but data is valid: "+block);
+ Bucket data = block.getData();
+ if(data != null) {
+ Logger.error(this, "Callback is
null (deleted??) but data is valid: "+data);
+ data.free();
+ data.removeFrom(container);
+ }
+ container.delete(block);
+ }
+ }
+ for(SplitfileBlock block : checkBlockStatus) {
+ if(block != null) {
+ Logger.error(this, "Callback is null
(deleted??) but data is valid: "+block);
+ Bucket data = block.getData();
+ if(data != null) {
+ Logger.error(this, "Callback is
null (deleted??) but data is valid: "+data);
+ data.free();
+ data.removeFrom(container);
+ }
+ container.delete(block);
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * @param container
+ * @param context
+ * @return True unless we were unable to remove the job because it has
already started.
+ */
+ public boolean cancel(ObjectContainer container, ClientContext context)
{
+ return queue.cancel(this, container, context);
+ }
}
\ No newline at end of file
Copied: trunk/freenet/src/freenet/client/FECQueue.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/FECQueue.java)
===================================================================
--- trunk/freenet/src/freenet/client/FECQueue.java
(rev 0)
+++ trunk/freenet/src/freenet/client/FECQueue.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,439 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.ListIterator;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Constraint;
+import com.db4o.query.Predicate;
+import com.db4o.query.Query;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.node.PrioRunnable;
+import freenet.node.RequestStarter;
+import freenet.support.Executor;
+import freenet.support.Logger;
+import freenet.support.OOMHandler;
+import freenet.support.OOMHook;
+import freenet.support.io.NativeThread;
+
+/**
+ * The FEC queue. Uses a limited number of threads (at most one per core), a
non-persistent queue,
+ * a persistent queue (kept in the database), and a transient cache of the
persistent queue.
+ * Sorted by priority and then by time added.
+ *
+ * Note that the FECQueue must be pulled from the database, because FECJob's
are queried based
+ * on their referring to it.
+ * @author toad
+ */
+public class FECQueue implements OOMHook {
+
+ private transient LinkedList[] transientQueue;
+ private transient LinkedList[] persistentQueueCache;
+ private transient int maxPersistentQueueCacheSize;
+ private transient int priorities;
+ private transient DBJobRunner databaseJobRunner;
+ private transient Executor executor;
+ private transient ClientContext clientContext;
+ private transient int runningFECThreads;
+ private transient int fecPoolCounter;
+ private transient PrioRunnable runner;
+ private transient DBJob cacheFillerJob;
+ private final long nodeDBHandle;
+
+ public static FECQueue create(final long nodeDBHandle, ObjectContainer
container) {
+ ObjectSet<FECQueue> result = container.query(new
Predicate<FECQueue>() {
+ public boolean match(FECQueue queue) {
+ if(queue.nodeDBHandle == nodeDBHandle) return
true;
+ return false;
+ }
+ });
+ if(result.hasNext()) {
+ FECQueue queue = result.next();
+ container.activate(queue, 1);
+ return queue;
+ } else {
+ FECQueue queue = new FECQueue(nodeDBHandle);
+ container.store(queue);
+ return queue;
+ }
+ }
+
+ private FECQueue(long nodeDBHandle) {
+ this.nodeDBHandle = nodeDBHandle;
+ }
+
+ /** Called after creating or deserializing the FECQueue. Initialises
all the transient fields. */
+ public void init(int priorities, int maxCacheSize, DBJobRunner
dbJobRunner, Executor exec, ClientContext clientContext) {
+ this.priorities = priorities;
+ this.maxPersistentQueueCacheSize = maxCacheSize;
+ this.databaseJobRunner = dbJobRunner;
+ this.executor = exec;
+ this.clientContext = clientContext;
+ transientQueue = new LinkedList[priorities];
+ persistentQueueCache = new LinkedList[priorities];
+ for(int i=0;i<priorities;i++) {
+ transientQueue[i] = new LinkedList();
+ persistentQueueCache[i] = new LinkedList();
+ }
+ OOMHandler.addOOMHook(this);
+ initRunner();
+ initCacheFillerJob();
+ queueCacheFiller();
+ }
+
+ private void queueCacheFiller() {
+ databaseJobRunner.queue(cacheFillerJob,
NativeThread.NORM_PRIORITY, false);
+ }
+
+ public void addToQueue(FECJob job, FECCodec codec, ObjectContainer
container) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR)
+ Logger.minor(StandardOnionFECCodec.class, "Adding a new
job to the queue: "+job+".");
+ int maxThreads = getMaxRunningFECThreads();
+ if(job.persistent) {
+ job.activateForExecution(container);
+ container.store(job);
+ }
+ boolean kept = false;
+
+ synchronized(this) {
+ if(!job.persistent) {
+ transientQueue[job.priority].addLast(job);
+ kept = true;
+ } else {
+ int totalAbove = 0;
+ for(int i=0;i<job.priority;i++) {
+ totalAbove +=
persistentQueueCache[i].size();
+ }
+ if(totalAbove >= maxPersistentQueueCacheSize) {
+ // Don't add.
+ if(logMINOR)
+ Logger.minor(this, "Not adding
persistent job to in-RAM cache, too many above it");
+ } else {
+ if(totalAbove +
persistentQueueCache[job.priority].size() >= maxPersistentQueueCacheSize) {
+ // Still don't add, within a
priority it's oldest first.
+ if(logMINOR)
+ Logger.minor(this, "Not
adding persistent job to in-RAM cache, too many at same priority");
+ } else {
+
persistentQueueCache[job.priority].addLast(job);
+ kept = true;
+ int total = totalAbove +
persistentQueueCache[job.priority].size();
+ for(int
i=job.priority+1;i<priorities;i++) {
+ total +=
persistentQueueCache[i].size();
+ while(total >=
maxPersistentQueueCacheSize && !persistentQueueCache[i].isEmpty()) {
+ if(logMINOR)
+
Logger.minor(this, "Removing low priority job from cache, total now "+total);
+
persistentQueueCache[i].removeLast();
+ total--;
+ }
+ }
+ }
+ }
+ }
+ if(!kept) {
+ if(logMINOR)
+ Logger.minor(this, "Deactivating job
"+job);
+ job.deactivate(container);
+ }
+ if(runningFECThreads < maxThreads) {
+ executor.execute(runner, "FEC Pool(" +
(fecPoolCounter++) + ")");
+ runningFECThreads++;
+ }
+ notifyAll();
+ }
+ }
+
+ private void initRunner() {
+ runner = new PrioRunnable() {
+ /**
+ * Runs on each thread.
+ * @author nextgens
+ */
+ public void run() {
+ freenet.support.Logger.OSThread.logPID(this);
+ try {
+ while(true) {
+ final FECJob job;
+ // Get a job
+ synchronized (FECQueue.this) {
+ job =
getFECJobBlockingNoDBAccess();
+ job.running = true;
+ }
+
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Running job
"+job);
+ // Encode it
+ try {
+ if (job.isADecodingJob)
+
job.getCodec().realDecode(job.dataBlockStatus, job.checkBlockStatus,
job.blockLength,
+
job.bucketFactory);
+ else {
+
job.getCodec().realEncode(job.dataBlocks, job.checkBlocks, job.blockLength,
job.bucketFactory);
+ // Update
SplitFileBlocks from buckets if necessary
+ if
((job.dataBlockStatus != null) || (job.checkBlockStatus != null)) {
+ for (int i = 0;
i < job.dataBlocks.length; i++)
+
job.dataBlockStatus[i].setData(job.dataBlocks[i]);
+ for (int i = 0;
i < job.checkBlocks.length; i++)
+
job.checkBlockStatus[i].setData(job.checkBlocks[i]);
+ }
+ }
+ } catch (IOException e) {
+ Logger.error(this, "BOH! ioe:"
+ e.getMessage(), e);
+ }
+
+ // Call the callback
+ try {
+ if(!job.persistent) {
+ if (job.isADecodingJob)
+
job.callback.onDecodedSegment(null, clientContext, job, job.dataBlocks,
job.checkBlocks, job.dataBlockStatus, job.checkBlockStatus);
+ else
+
job.callback.onEncodedSegment(null, clientContext, job, job.dataBlocks,
job.checkBlocks, job.dataBlockStatus, job.checkBlockStatus);
+ } else {
+
if(Logger.shouldLog(Logger.MINOR, this))
+
Logger.minor(this, "Scheduling callback for "+job+"...");
+ int prio =
job.isADecodingJob ? NativeThread.NORM_PRIORITY+1 : NativeThread.NORM_PRIORITY;
+ if(job.priority >
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS)
+ prio--;
+ if(job.priority >=
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS)
+ prio--;
+
databaseJobRunner.queue(new DBJob() {
+
+ public void
run(ObjectContainer container, ClientContext context) {
+
job.storeBlockStatuses(container);
+ //
Don't activate the job itself.
+ // It
MUST already be activated, because it is carrying the status blocks.
+ // The
status blocks have been set on the FEC thread but *not stored* because
+ // they
can't be stored on the FEC thread.
+
Logger.minor(this, "Activating "+job.callback+" is
active="+container.ext().isActive(job.callback));
+
container.activate(job.callback, 1);
+
if(Logger.shouldLog(Logger.MINOR, this))
+
Logger.minor(this, "Running callback for "+job);
+ try {
+
if(job.isADecodingJob)
+
job.callback.onDecodedSegment(container, clientContext, job, job.dataBlocks,
job.checkBlocks, job.dataBlockStatus, job.checkBlockStatus);
+ else
+
job.callback.onEncodedSegment(container, clientContext, job, job.dataBlocks,
job.checkBlocks, job.dataBlockStatus, job.checkBlockStatus);
+ } catch
(Throwable t) {
+
Logger.error(this, "Caught "+t+" in FECQueue callback", t);
+ }
finally {
+
// Always delete the job, even if the callback throws.
+
container.delete(job);
+ }
+
if(container.ext().isStored(job.callback))
+
container.deactivate(job.callback, 1);
+ }
+
+ }, prio, false);
+
if(Logger.shouldLog(Logger.MINOR, this))
+
Logger.minor(this, "Scheduled callback for "+job+"...");
+
+ }
+ } catch (Throwable e) {
+ Logger.error(this, "The
callback failed!" + e, e);
+ }
+ }
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+" in "+this, t);
+ }
+ finally {
+ synchronized (FECQueue.this) {
+ runningFECThreads--;
+ }
+ }
+ }
+
+ public int getPriority() {
+ return NativeThread.LOW_PRIORITY;
+ }
+
+ };
+ }
+
+ private void initCacheFillerJob() {
+ cacheFillerJob = new DBJob() {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+ // Try to avoid accessing the database while
synchronized on the FECQueue.
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "Running FEC cache
filler job");
+ while(true) {
+ boolean addedAny = false;
+ int totalCached = 0;
+ for(short prio=0;prio<priorities;prio++) {
+ int grab = 0;
+ synchronized(FECQueue.this) {
+ int newCached = totalCached +
persistentQueueCache[prio].size();
+ if(newCached >=
maxPersistentQueueCacheSize) return;
+ grab =
maxPersistentQueueCacheSize - newCached;
+ totalCached = newCached;
+ }
+ if(logMINOR) Logger.minor(this,
"Grabbing up to "+grab+" jobs at priority "+prio);
+ Query query = container.query();
+ query.constrain(FECJob.class);
+ Constraint con =
query.descend("priority").constrain(new Short(prio));
+
con.and(query.descend("queue").constrain(FECQueue.this).identity());
+
query.descend("addedTime").orderAscending();
+ ObjectSet results = query.execute();
+ if(results.hasNext()) {
+ for(int j=0;j<grab &&
results.hasNext();j++) {
+ FECJob job = (FECJob)
results.next();
+
job.activateForExecution(container);
+
if(job.isCancelled(container)) {
+
container.delete(job);
+ continue;
+ }
+ if(logMINOR)
Logger.minor(this, "Maybe adding "+job);
+
synchronized(FECQueue.this) {
+ if(job.running)
{
+ j--;
+
if(logMINOR) Logger.minor(this, "Not adding, already running: "+job);
+
continue;
+ }
+
if(persistentQueueCache[prio].contains(job)) {
+ j--;
+
if(logMINOR) Logger.minor(this, "Not adding as on persistent queue cache for
"+prio+" : "+job);
+
continue;
+ }
+ boolean added =
false;
+
for(ListIterator it = persistentQueueCache[prio].listIterator();it.hasNext();) {
+ FECJob
cmp = (FECJob) it.next();
+
if(cmp.addedTime >= job.addedTime) {
+
it.previous();
+
it.add(job);
+
added = true;
+
if(logMINOR) Logger.minor(this, "Adding "+job+" before "+it);
+
break;
+ }
+ }
+ if(!added)
persistentQueueCache[prio].addLast(job);
+ if(logMINOR)
Logger.minor(this, "Added "+job);
+ addedAny = true;
+ }
+ }
+ }
+ }
+ if(!addedAny) {
+ if(logMINOR)
+ Logger.minor(this, "No more
jobs to add");
+ // Don't notify, let it sleep until
more jobs are added.
+ return;
+ } else {
+ int maxRunningThreads =
getMaxRunningFECThreads();
+ synchronized(FECQueue.this) {
+ if(runningFECThreads <
maxRunningThreads) {
+ int queueSize = 0;
+ for(int
i=0;i<priorities;i++) {
+ queueSize +=
persistentQueueCache[i].size();
+ if(queueSize +
runningFECThreads > maxRunningThreads) break;
+ }
+ if(queueSize +
runningFECThreads < maxRunningThreads)
+
maxRunningThreads = queueSize + runningFECThreads;
+ while(runningFECThreads
< maxRunningThreads) {
+
executor.execute(runner, "FEC Pool "+fecPoolCounter++);
+
runningFECThreads++;
+ }
+ }
+ FECQueue.this.notifyAll();
+ }
+ }
+ }
+ }
+ };
+
+ }
+
+ private int maxRunningFECThreads = -1;
+
+ private synchronized int getMaxRunningFECThreads() {
+ if (maxRunningFECThreads != -1)
+ return maxRunningFECThreads;
+ String osName = System.getProperty("os.name");
+ if(osName.indexOf("Windows") == -1 &&
(osName.toLowerCase().indexOf("mac os x") > 0) ||
(!NativeThread.usingNativeCode())) {
+ // OS/X niceness is really weak, so we don't want any
more background CPU load than necessary
+ // Also, on non-Windows, we need the native threads
library to be working.
+ maxRunningFECThreads = 1;
+ } else {
+ // Most other OSs will have reasonable niceness, so go
by RAM.
+ Runtime r = Runtime.getRuntime();
+ int max = r.availableProcessors(); // FIXME this may
change in a VM, poll it
+ long maxMemory = r.maxMemory();
+ if(maxMemory < 256*1024*1024) {
+ max = 1;
+ } else {
+ // Measured 11MB decode 8MB encode on amd64.
+ // No more than 10% of available RAM, so 110MB
for each extra processor.
+ // No more than 3 so that we don't reach any
FileDescriptor related limit
+ max = Math.min(3, Math.min(max, (int)
(Math.min(Integer.MAX_VALUE, maxMemory / (128*1024*1024)))));
+ }
+ maxRunningFECThreads = max;
+ }
+ Logger.minor(FECCodec.class, "Maximum FEC threads:
"+maxRunningFECThreads);
+ return maxRunningFECThreads;
+ }
+
+ /**
+ * Find a FEC job to run.
+ * @return null only if there are too many FEC threads running.
+ */
+ protected synchronized FECJob getFECJobBlockingNoDBAccess() {
+ while(true) {
+ if(runningFECThreads > getMaxRunningFECThreads())
+ return null;
+ for(int i=0;i<priorities;i++) {
+ if(!transientQueue[i].isEmpty())
+ return (FECJob)
transientQueue[i].removeFirst();
+ if(!persistentQueueCache[i].isEmpty())
+ return (FECJob)
persistentQueueCache[i].removeFirst();
+ }
+ queueCacheFiller();
+ try {
+ wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+
+ public synchronized void handleLowMemory() throws Exception {
+ maxRunningFECThreads = Math.max(1, maxRunningFECThreads - 1);
+ notify(); // not notifyAll()
+ }
+
+ public synchronized void handleOutOfMemory() throws Exception {
+ maxRunningFECThreads = 1;
+ notifyAll();
+ }
+
+ public void objectOnDeactivate(ObjectContainer container) {
+ Logger.error(this, "Attempting to deactivate FECQueue!", new
Exception("debug"));
+ }
+
+ /**
+ * @param job
+ * @param container
+ * @param context
+ * @return True unless we were unable to remove the job because it has
already started.
+ */
+ public boolean cancel(FECJob job, ObjectContainer container,
ClientContext context) {
+ synchronized(this) {
+ for(int i=0;i<priorities;i++) {
+ transientQueue[i].remove(job);
+ persistentQueueCache[i].remove(job);
+ }
+ }
+ synchronized(job) {
+ if(job.running) return false;
+ }
+ if(job.persistent)
+ container.delete(job);
+ return true;
+ }
+}
Modified: trunk/freenet/src/freenet/client/FailureCodeTracker.java
===================================================================
--- trunk/freenet/src/freenet/client/FailureCodeTracker.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/FailureCodeTracker.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -7,6 +7,8 @@
import java.util.HashMap;
import java.util.Iterator;
+import com.db4o.ObjectContainer;
+
import freenet.support.SimpleFieldSet;
/**
@@ -55,9 +57,10 @@
int x;
}
- final HashMap map = new HashMap();
+ private HashMap map;
public synchronized void inc(int k) {
+ if(map == null) map = new HashMap();
Integer key = k;
Item i = (Item) map.get(key);
if(i == null)
@@ -67,6 +70,7 @@
}
public synchronized void inc(Integer k, int val) {
+ if(map == null) map = new HashMap();
Integer key = k;
Item i = (Item) map.get(key);
if(i == null)
@@ -76,6 +80,7 @@
}
public synchronized String toVerboseString() {
+ if(map == null) return super.toString()+":empty";
StringBuilder sb = new StringBuilder();
Collection values = map.keySet();
Iterator i = values.iterator();
@@ -93,6 +98,7 @@
@Override
public synchronized String toString() {
+ if(map == null) return super.toString()+":empty";
StringBuilder sb = new StringBuilder(super.toString());
sb.append(':');
if(map.size() == 0) sb.append("empty");
@@ -112,6 +118,8 @@
* Merge codes from another tracker into this one.
*/
public synchronized FailureCodeTracker merge(FailureCodeTracker source)
{
+ if(source.map == null) return this;
+ if(map == null) map = new HashMap();
Iterator keys = source.map.keySet().iterator();
while(keys.hasNext()) {
Integer k = (Integer) keys.next();
@@ -137,6 +145,7 @@
/** Copy verbosely to a SimpleFieldSet */
public synchronized SimpleFieldSet toFieldSet(boolean verbose) {
SimpleFieldSet sfs = new SimpleFieldSet(false);
+ if(map != null) {
Iterator keys = map.keySet().iterator();
while(keys.hasNext()) {
Integer k = (Integer) keys.next();
@@ -149,6 +158,7 @@
insert ?
InsertException.getMessage(code) : FetchException.getMessage(code));
sfs.put(Integer.toString(code)+".Count", item.x);
}
+ }
return sfs;
}
@@ -161,6 +171,7 @@
}
public synchronized boolean isFatal(boolean insert) {
+ if(map == null) return false;
Iterator i = map.keySet().iterator();
while(i.hasNext()) {
Integer code = (Integer) i.next();
@@ -181,8 +192,37 @@
inc(e.getMode());
}
- public boolean isEmpty() {
- return map.isEmpty();
+ public synchronized boolean isEmpty() {
+ return map == null || map.isEmpty();
}
+
+ public void removeFrom(ObjectContainer container) {
+ Item[] items;
+ Integer[] ints;
+ synchronized(this) {
+ items = map == null ? null : (Item[])
map.values().toArray(new Item[map.size()]);
+ ints = map == null ? null : (Integer[])
map.keySet().toArray(new Integer[map.size()]);
+ if(map != null) map.clear();
+ }
+ if(items != null)
+ for(int i=0;i<items.length;i++) {
+ container.delete(items[i]);
+ container.delete(ints[i]);
+ }
+ if(map != null) {
+ container.activate(map, 5);
+ container.delete(map);
+ }
+ container.delete(this);
+ }
+ public void objectOnActivate(ObjectContainer container) {
+ if(map != null) container.activate(map, 5);
+ }
+
+ public FailureCodeTracker clone() {
+ FailureCodeTracker tracker = new FailureCodeTracker(insert);
+ tracker.merge(this);
+ return tracker;
+ }
}
Modified: trunk/freenet/src/freenet/client/FetchContext.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchContext.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/FetchContext.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -5,14 +5,11 @@
import java.util.Set;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.BlockSet;
-import freenet.client.async.HealingQueue;
-import freenet.client.async.USKManager;
import freenet.client.events.ClientEventProducer;
import freenet.client.events.SimpleEventProducer;
-import freenet.crypt.RandomSource;
-import freenet.node.Ticker;
-import freenet.support.Executor;
import freenet.support.api.BucketFactory;
/** Context for a Fetcher. Contains all the settings a Fetcher needs to know
about. */
@@ -25,9 +22,6 @@
/** Low-level client to send low-level requests to. */
public long maxOutputLength;
public long maxTempLength;
- public final ArchiveManager archiveManager;
- public final BucketFactory bucketFactory;
- public USKManager uskManager;
public int maxRecursionLevel;
public int maxArchiveRestarts;
public int maxArchiveLevels;
@@ -35,7 +29,6 @@
public int maxSplitfileThreads;
public int maxSplitfileBlockRetries;
public int maxNonSplitfileRetries;
- public final RandomSource random;
public int maxUSKRetries;
public boolean allowSplitfiles;
public boolean followRedirects;
@@ -49,14 +42,11 @@
/** If true, and we get a ZIP manifest, and we have no meta-strings
left, then
* return the manifest contents as data. */
public boolean returnZIPManifests;
- public final HealingQueue healingQueue;
public final boolean ignoreTooManyPathComponents;
/** If set, contains a set of blocks to be consulted before checking
the datastore. */
- public BlockSet blocks;
+ public final BlockSet blocks;
public Set allowedMIMETypes;
- public final Ticker ticker;
- public final Executor executor;
- public final Executor[] slowSerialExecutor;
+ private final boolean hasOwnEventProducer;
public FetchContext(long curMaxLength,
long curMaxTempLength, int maxMetadataSize, int
maxRecursionLevel, int maxArchiveRestarts, int maxArchiveLevels,
@@ -64,24 +54,17 @@
int maxSplitfileBlockRetries, int
maxNonSplitfileRetries, int maxUSKRetries,
boolean allowSplitfiles, boolean followRedirects,
boolean localRequestOnly,
int maxDataBlocksPerSegment, int
maxCheckBlocksPerSegment,
- RandomSource random, ArchiveManager archiveManager,
BucketFactory bucketFactory,
- ClientEventProducer producer, boolean
cacheLocalRequests, USKManager uskManager,
- HealingQueue hq, boolean ignoreTooManyPathComponents,
Ticker ticker, Executor executor,
- Executor[] slowSerialExecutor) {
- this.ticker = ticker;
- this.executor = executor;
- this.slowSerialExecutor = slowSerialExecutor;
+ BucketFactory bucketFactory,
+ ClientEventProducer producer, boolean
cacheLocalRequests,
+ boolean ignoreTooManyPathComponents) {
+ this.blocks = null;
this.maxOutputLength = curMaxLength;
- this.uskManager = uskManager;
this.maxTempLength = curMaxTempLength;
this.maxMetadataSize = maxMetadataSize;
- this.archiveManager = archiveManager;
- this.bucketFactory = bucketFactory;
this.maxRecursionLevel = maxRecursionLevel;
this.maxArchiveRestarts = maxArchiveRestarts;
this.maxArchiveLevels = maxArchiveLevels;
this.dontEnterImplicitArchives = dontEnterImplicitArchives;
- this.random = random;
this.maxSplitfileThreads = maxSplitfileThreads;
this.maxSplitfileBlockRetries = maxSplitfileBlockRetries;
this.maxNonSplitfileRetries = maxNonSplitfileRetries;
@@ -93,35 +76,37 @@
this.maxDataBlocksPerSegment = maxDataBlocksPerSegment;
this.maxCheckBlocksPerSegment = maxCheckBlocksPerSegment;
this.cacheLocalRequests = cacheLocalRequests;
- this.healingQueue = hq;
this.ignoreTooManyPathComponents = ignoreTooManyPathComponents;
+ hasOwnEventProducer = true;
}
- public FetchContext(FetchContext ctx, int maskID, boolean keepProducer)
{
- this.healingQueue = ctx.healingQueue;
+ /** Copy a FetchContext.
+ * @param ctx
+ * @param maskID
+ * @param keepProducer
+ * @param blocks Storing a BlockSet to the database is not supported,
see comments on SimpleBlockSet.objectCanNew().
+ */
+ public FetchContext(FetchContext ctx, int maskID, boolean keepProducer,
BlockSet blocks) {
if(keepProducer)
this.eventProducer = ctx.eventProducer;
else
this.eventProducer = new SimpleEventProducer();
- this.ticker = ctx.ticker;
- this.executor = ctx.executor;
- this.slowSerialExecutor = ctx.slowSerialExecutor;
- this.uskManager = ctx.uskManager;
+ hasOwnEventProducer = !keepProducer;
this.ignoreTooManyPathComponents =
ctx.ignoreTooManyPathComponents;
- this.blocks = ctx.blocks;
+ if(blocks != null)
+ this.blocks = blocks;
+ else
+ this.blocks = ctx.blocks;
this.allowedMIMETypes = ctx.allowedMIMETypes;
this.maxUSKRetries = ctx.maxUSKRetries;
if(maskID == IDENTICAL_MASK) {
this.maxOutputLength = ctx.maxOutputLength;
this.maxMetadataSize = ctx.maxMetadataSize;
this.maxTempLength = ctx.maxTempLength;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = ctx.maxRecursionLevel;
this.maxArchiveRestarts = ctx.maxArchiveRestarts;
this.maxArchiveLevels = ctx.maxArchiveLevels;
this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
- this.random = ctx.random;
this.maxSplitfileThreads = ctx.maxSplitfileThreads;
this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
@@ -136,13 +121,10 @@
this.maxOutputLength = ctx.maxOutputLength;
this.maxMetadataSize = ctx.maxMetadataSize;
this.maxTempLength = ctx.maxTempLength;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = 1;
this.maxArchiveRestarts = 0;
this.maxArchiveLevels = ctx.maxArchiveLevels;
this.dontEnterImplicitArchives = true;
- this.random = ctx.random;
this.maxSplitfileThreads = 0;
this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
this.maxNonSplitfileRetries =
ctx.maxSplitfileBlockRetries;
@@ -157,13 +139,10 @@
this.maxOutputLength = ctx.maxOutputLength;
this.maxTempLength = ctx.maxTempLength;
this.maxMetadataSize = ctx.maxMetadataSize;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = ctx.maxRecursionLevel;
this.maxArchiveRestarts = ctx.maxArchiveRestarts;
this.maxArchiveLevels = ctx.maxArchiveLevels;
this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
- this.random = ctx.random;
this.maxSplitfileThreads = ctx.maxSplitfileThreads;
this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
@@ -178,13 +157,10 @@
this.maxOutputLength = ctx.maxOutputLength;
this.maxMetadataSize = ctx.maxMetadataSize;
this.maxTempLength = ctx.maxTempLength;
- this.archiveManager = ctx.archiveManager;
- this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = ctx.maxRecursionLevel;
this.maxArchiveRestarts = ctx.maxArchiveRestarts;
this.maxArchiveLevels = ctx.maxArchiveLevels;
this.dontEnterImplicitArchives =
ctx.dontEnterImplicitArchives;
- this.random = ctx.random;
this.maxSplitfileThreads = ctx.maxSplitfileThreads;
this.maxSplitfileBlockRetries =
ctx.maxSplitfileBlockRetries;
this.maxNonSplitfileRetries =
ctx.maxNonSplitfileRetries;
@@ -201,13 +177,23 @@
/** Make public, but just call parent for a field for field copy */
@Override
- public Object clone() {
+ public FetchContext clone() {
try {
- return super.clone();
+ return (FetchContext) super.clone();
} catch (CloneNotSupportedException e) {
// Impossible
throw new Error(e);
}
}
+
+ public void removeFrom(ObjectContainer container) {
+ if(hasOwnEventProducer) {
+ container.activate(eventProducer, 1);
+ eventProducer.removeFrom(container);
+ }
+ // Storing a BlockSet to the database is not supported, see
comments on SimpleBlockSet.objectCanNew().
+ // allowedMIMETypes is passed in, whoever passes it in is
responsible for deleting it.
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/FetchException.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/FetchException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.l10n.L10n;
import freenet.support.Logger;
@@ -50,7 +52,9 @@
errorCodes = null;
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
@@ -63,7 +67,9 @@
newURI = null;
this.expectedSize = expectedSize;
this.expectedMimeType = expectedMimeType;
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
@@ -76,7 +82,9 @@
newURI = uri;
this.expectedSize = expectedSize;
this.expectedMimeType = expectedMimeType;
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
@@ -88,8 +96,10 @@
initCause(e);
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+e,e);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(ArchiveFailureException e) {
@@ -100,8 +110,10 @@
newURI = null;
initCause(e);
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+e,e);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(ArchiveRestartException e) {
@@ -112,8 +124,11 @@
initCause(e);
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+e,e); }
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
+ }
public FetchException(int mode, Throwable t) {
super(getMessage(mode)+": "+t.getMessage());
@@ -123,8 +138,10 @@
initCause(t);
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+t.getMessage(),t);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(int mode, String reason, Throwable t) {
@@ -135,8 +152,10 @@
initCause(t);
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+t.getMessage(),t);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(int mode, FailureCodeTracker errorCodes) {
@@ -146,8 +165,10 @@
this.errorCodes = errorCodes;
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "FetchException("+getMessage(mode)+
')');
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(int mode, String msg) {
@@ -157,8 +178,10 @@
this.mode = mode;
newURI = null;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+msg,this);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(int mode, FreenetURI newURI) {
@@ -168,8 +191,10 @@
errorCodes = null;
this.newURI = newURI;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+") -> "+newURI, this);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(int mode, String msg, FreenetURI uri) {
@@ -179,8 +204,10 @@
this.mode = mode;
newURI = uri;
expectedSize = -1;
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this,
"FetchException("+getMessage(mode)+"): "+msg,this);
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(FetchException e, int newMode) {
@@ -192,6 +219,10 @@
this.expectedSize = e.expectedSize;
this.extraMessage = e.extraMessage;
this.finalizedSizeAndMimeType = e.finalizedSizeAndMimeType;
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
public FetchException(FetchException e, FreenetURI uri) {
@@ -204,8 +235,28 @@
this.expectedSize = e.expectedSize;
this.extraMessage = e.extraMessage;
this.finalizedSizeAndMimeType = e.finalizedSizeAndMimeType;
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
}
+ public FetchException(FetchException e) {
+ super(e.getMessage());
+ initCause(e);
+ this.mode = e.mode;
+ this.newURI = e.newURI == null ? null : e.newURI.clone();
+ this.errorCodes = e.errorCodes == null ? null :
e.errorCodes.clone();
+ this.expectedMimeType = e.expectedMimeType;
+ this.expectedSize = e.expectedSize;
+ this.extraMessage = e.extraMessage;
+ this.finalizedSizeAndMimeType = e.finalizedSizeAndMimeType;
+ if(mode == INTERNAL_ERROR)
+ Logger.error(this, "Internal error: "+this);
+ else if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "FetchException("+getMessage(mode)+
')', this);
+ }
+
public static String getShortMessage(int mode) {
String ret = L10n.getString("FetchException.shortError."+mode);
if(ret == null)
@@ -369,4 +420,20 @@
public void setNotFinalizedSize() {
this.finalizedSizeAndMimeType = false;
}
+
+ public void removeFrom(ObjectContainer container) {
+ if(errorCodes != null)
+ errorCodes.removeFrom(container);
+ if(newURI != null)
+ newURI.removeFrom(container);
+ StackTraceElement[] elements = getStackTrace();
+ if(elements != null)
+ for(StackTraceElement element : elements)
+ container.delete(element);
+ container.delete(this);
+ }
+
+ public FetchException clone() {
+ return new FetchException(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/FetchWaiter.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchWaiter.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/FetchWaiter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.BaseClientPutter;
import freenet.client.async.ClientCallback;
import freenet.client.async.ClientGetter;
@@ -14,29 +16,29 @@
private FetchException error;
private boolean finished;
- public synchronized void onSuccess(FetchResult result, ClientGetter
state) {
+ public synchronized void onSuccess(FetchResult result, ClientGetter
state, ObjectContainer container) {
if(finished) return;
this.result = result;
finished = true;
notifyAll();
}
- public synchronized void onFailure(FetchException e, ClientGetter
state) {
+ public synchronized void onFailure(FetchException e, ClientGetter
state, ObjectContainer container) {
if(finished) return;
this.error = e;
finished = true;
notifyAll();
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
throw new UnsupportedOperationException();
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
throw new UnsupportedOperationException();
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
throw new UnsupportedOperationException();
}
@@ -53,11 +55,11 @@
return result;
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Ignore
}
}
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -11,6 +11,7 @@
import freenet.client.async.ClientPutter;
import freenet.client.events.ClientEventListener;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
public interface HighLevelSimpleClient {
@@ -38,12 +39,12 @@
/**
* Blocking fetch of a URI with a configurable max-size and context
object.
*/
- public FetchResult fetch(FreenetURI uri, long maxSize, Object context)
throws FetchException;
+ public FetchResult fetch(FreenetURI uri, long maxSize, RequestClient
context) throws FetchException;
/**
* Non-blocking fetch of a URI with a configurable max-size (in bytes),
context object, callback and context.
*/
- public ClientGetter fetch(FreenetURI uri, long maxSize, Object context,
ClientCallback callback, FetchContext fctx) throws FetchException;
+ public ClientGetter fetch(FreenetURI uri, long maxSize, RequestClient
context, ClientCallback callback, FetchContext fctx) throws FetchException;
/**
* Blocking insert.
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -7,12 +7,12 @@
import java.util.HashMap;
import java.util.Set;
-import freenet.client.async.BackgroundBlockEncoder;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.BaseClientPutter;
import freenet.client.async.ClientCallback;
import freenet.client.async.ClientGetter;
import freenet.client.async.ClientPutter;
-import freenet.client.async.HealingQueue;
import freenet.client.async.SimpleManifestPutter;
import freenet.client.events.ClientEventListener;
import freenet.client.events.ClientEventProducer;
@@ -22,9 +22,9 @@
import freenet.keys.FreenetURI;
import freenet.keys.InsertableClientSSK;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestScheduler;
import freenet.node.RequestStarter;
-import freenet.support.Executor;
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
@@ -33,23 +33,19 @@
import freenet.support.io.NullPersistentFileTracker;
import freenet.support.io.PersistentFileTracker;
-public class HighLevelSimpleClientImpl implements HighLevelSimpleClient {
+public class HighLevelSimpleClientImpl implements HighLevelSimpleClient,
RequestClient {
- private final ArchiveManager archiveManager;
private final short priorityClass;
private final BucketFactory bucketFactory;
private final BucketFactory persistentBucketFactory;
private final PersistentFileTracker persistentFileTracker;
private final NodeClientCore core;
- private final BackgroundBlockEncoder blockEncoder;
/** One CEP for all requests and inserts */
private final ClientEventProducer globalEventProducer;
private long curMaxLength;
private long curMaxTempLength;
private int curMaxMetadataLength;
private final RandomSource random;
- private final HealingQueue healingQueue;
- private final Executor slowSerialExecutor[];
/** See comments in Node */
private final boolean cacheLocalRequests;
static final int MAX_RECURSION = 10;
@@ -81,27 +77,23 @@
// going by memory usage only; 4kB per stripe
static final int MAX_SPLITFILE_BLOCKS_PER_SEGMENT = 1024;
static final int MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT = 1536;
- static final int SPLITFILE_BLOCKS_PER_SEGMENT = 128;
+ public static final int SPLITFILE_BLOCKS_PER_SEGMENT = 128;
static final int SPLITFILE_CHECK_BLOCKS_PER_SEGMENT = 128;
- public HighLevelSimpleClientImpl(NodeClientCore node, ArchiveManager
mgr, BucketFactory bf, RandomSource r, boolean cacheLocalRequests, short
priorityClass, boolean forceDontIgnoreTooManyPathComponents, Executor[]
slowSerialExecutor) {
+ public HighLevelSimpleClientImpl(NodeClientCore node, BucketFactory bf,
RandomSource r, boolean cacheLocalRequests, short priorityClass, boolean
forceDontIgnoreTooManyPathComponents) {
this.core = node;
- this.slowSerialExecutor = slowSerialExecutor;
- archiveManager = mgr;
this.priorityClass = priorityClass;
bucketFactory = bf;
this.persistentFileTracker = node.persistentTempBucketFactory;
random = r;
this.globalEventProducer = new SimpleEventProducer();
- globalEventProducer.addEventListener(new
EventLogger(Logger.MINOR));
+ globalEventProducer.addEventListener(new
EventLogger(Logger.MINOR, false));
curMaxLength = Long.MAX_VALUE;
curMaxTempLength = Long.MAX_VALUE;
curMaxMetadataLength = 1024 * 1024;
this.cacheLocalRequests = cacheLocalRequests;
this.persistentBucketFactory = node.persistentTempBucketFactory;
- this.healingQueue = node.getHealingQueue();
- this.blockEncoder = node.backgroundBlockEncoder;
}
public void setMaxLength(long maxLength) {
@@ -119,8 +111,8 @@
if(uri == null) throw new NullPointerException();
FetchContext context = getFetchContext();
FetchWaiter fw = new FetchWaiter();
- ClientGetter get = new ClientGetter(fw,
core.requestStarters.chkFetchScheduler, core.requestStarters.sskFetchScheduler,
uri, context, priorityClass, this, null, null);
- get.start();
+ ClientGetter get = new ClientGetter(fw, uri, context,
priorityClass, this, null, null);
+ core.clientContext.start(get);
return fw.waitForCompletion();
}
@@ -128,19 +120,19 @@
return fetch(uri, overrideMaxSize, this);
}
- public FetchResult fetch(FreenetURI uri, long overrideMaxSize, Object
clientContext) throws FetchException {
+ public FetchResult fetch(FreenetURI uri, long overrideMaxSize,
RequestClient clientContext) throws FetchException {
if(uri == null) throw new NullPointerException();
FetchWaiter fw = new FetchWaiter();
FetchContext context = getFetchContext(overrideMaxSize);
- ClientGetter get = new ClientGetter(fw,
core.requestStarters.chkFetchScheduler, core.requestStarters.sskFetchScheduler,
uri, context, priorityClass, clientContext, null, null);
- get.start();
+ ClientGetter get = new ClientGetter(fw, uri, context,
priorityClass, clientContext, null, null);
+ core.clientContext.start(get);
return fw.waitForCompletion();
}
- public ClientGetter fetch(FreenetURI uri, long maxSize, Object context,
ClientCallback callback, FetchContext fctx) throws FetchException {
+ public ClientGetter fetch(FreenetURI uri, long maxSize, RequestClient
clientContext, ClientCallback callback, FetchContext fctx) throws
FetchException {
if(uri == null) throw new NullPointerException();
- ClientGetter get = new ClientGetter(callback,
core.requestStarters.chkFetchScheduler, core.requestStarters.sskFetchScheduler,
uri, fctx, priorityClass, context, null, null);
- get.start();
+ ClientGetter get = new ClientGetter(callback, uri, fctx,
priorityClass, clientContext, null, null);
+ core.clientContext.start(get);
return get;
}
@@ -152,17 +144,17 @@
InsertContext context = getInsertContext(true);
PutWaiter pw = new PutWaiter();
ClientPutter put = new ClientPutter(pw, insert.getData(),
insert.desiredURI, insert.clientMetadata,
- context, core.requestStarters.chkPutScheduler,
core.requestStarters.sskPutScheduler, priorityClass,
+ context, priorityClass,
getCHKOnly, isMetadata, this, null,
filenameHint, false);
- put.start(false);
+ core.clientContext.start(put, false);
return pw.waitForCompletion();
}
public ClientPutter insert(InsertBlock insert, boolean getCHKOnly,
String filenameHint, boolean isMetadata, InsertContext ctx, ClientCallback cb)
throws InsertException {
ClientPutter put = new ClientPutter(cb, insert.getData(),
insert.desiredURI, insert.clientMetadata,
- ctx, core.requestStarters.chkPutScheduler,
core.requestStarters.sskPutScheduler, priorityClass,
+ ctx, priorityClass,
getCHKOnly, isMetadata, this, null,
filenameHint, false);
- put.start(false);
+ core.clientContext.start(put, false);
return put;
}
@@ -186,8 +178,8 @@
public FreenetURI insertManifest(FreenetURI insertURI, HashMap
bucketsByName, String defaultName) throws InsertException {
PutWaiter pw = new PutWaiter();
SimpleManifestPutter putter =
- new SimpleManifestPutter(pw,
core.requestStarters.chkPutScheduler, core.requestStarters.sskPutScheduler,
SimpleManifestPutter.bucketsByNameToManifestEntries(bucketsByName),
priorityClass, insertURI, defaultName, getInsertContext(true), false, this,
false);
- putter.start();
+ new SimpleManifestPutter(pw,
SimpleManifestPutter.bucketsByNameToManifestEntries(bucketsByName),
priorityClass, insertURI, defaultName, getInsertContext(true), false, this,
false);
+ core.clientContext.start(putter);
return pw.waitForCompletion();
}
@@ -212,17 +204,16 @@
SPLITFILE_THREADS, SPLITFILE_BLOCK_RETRIES,
NON_SPLITFILE_RETRIES, USK_RETRIES,
FETCH_SPLITFILES, FOLLOW_REDIRECTS,
LOCAL_REQUESTS_ONLY,
MAX_SPLITFILE_BLOCKS_PER_SEGMENT,
MAX_SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
- random, archiveManager, bucketFactory,
globalEventProducer,
- cacheLocalRequests, core.uskManager,
healingQueue,
- false, core.getTicker(), core.getExecutor(),
slowSerialExecutor);
+ bucketFactory, globalEventProducer,
+ cacheLocalRequests, false);
}
public InsertContext getInsertContext(boolean forceNonPersistent) {
return new InsertContext(bucketFactory, forceNonPersistent ?
bucketFactory : persistentBucketFactory,
forceNonPersistent ?
NullPersistentFileTracker.getInstance() : persistentFileTracker,
- random, INSERT_RETRIES,
CONSECUTIVE_RNFS_ASSUME_SUCCESS,
+ INSERT_RETRIES, CONSECUTIVE_RNFS_ASSUME_SUCCESS,
SPLITFILE_INSERT_THREADS,
SPLITFILE_BLOCKS_PER_SEGMENT, SPLITFILE_CHECK_BLOCKS_PER_SEGMENT,
- globalEventProducer, cacheLocalRequests,
core.uskManager, blockEncoder, core.getExecutor(), core.compressor);
+ globalEventProducer, cacheLocalRequests);
}
public FreenetURI[] generateKeyPair(String docName) {
@@ -232,31 +223,31 @@
private final ClientCallback nullCallback = new ClientCallback() {
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
// Ignore
}
- public void onFailure(InsertException e, BaseClientPutter
state) {
+ public void onFailure(InsertException e, BaseClientPutter
state, ObjectContainer container) {
// Impossible
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Impossible
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter
state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter
state, ObjectContainer container) {
// Impossible
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
result.data.free();
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
// Impossible
}
@@ -265,18 +256,26 @@
public void prefetch(FreenetURI uri, long timeout, long maxSize, Set
allowedTypes) {
FetchContext ctx = getFetchContext(maxSize);
ctx.allowedMIMETypes = allowedTypes;
- final ClientGetter get = new ClientGetter(nullCallback,
core.requestStarters.chkFetchScheduler, core.requestStarters.sskFetchScheduler,
uri, ctx, RequestStarter.PREFETCH_PRIORITY_CLASS, this, new NullBucket(), null);
- ctx.ticker.queueTimedJob(new Runnable() {
+ final ClientGetter get = new ClientGetter(nullCallback, uri,
ctx, RequestStarter.PREFETCH_PRIORITY_CLASS, this, new NullBucket(), null);
+ core.getTicker().queueTimedJob(new Runnable() {
public void run() {
- get.cancel();
+ get.cancel(null, core.clientContext);
}
}, timeout);
try {
- get.start();
+ core.clientContext.start(get);
} catch (FetchException e) {
// Ignore
}
}
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/client/InsertBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/InsertBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/InsertBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.support.api.Bucket;
@@ -11,10 +13,10 @@
*/
public class InsertBlock {
- private final Bucket data;
+ private Bucket data;
private boolean isFreed;
- public final FreenetURI desiredURI;
- public final ClientMetadata clientMetadata;
+ public FreenetURI desiredURI;
+ public ClientMetadata clientMetadata;
public InsertBlock(Bucket data, ClientMetadata metadata, FreenetURI
desiredURI) {
if(data == null) throw new NullPointerException();
@@ -31,11 +33,54 @@
return (isFreed ? null : data);
}
- public void free(){
+ public void free(ObjectContainer container){
synchronized (this) {
if(isFreed) return;
isFreed = true;
+ if(data == null) return;
}
data.free();
+ if(container != null) {
+ data.removeFrom(container);
+ }
}
+
+ public void removeFrom(ObjectContainer container) {
+ if(data != null) {
+ container.activate(data, 1);
+ data.removeFrom(container);
+ }
+ if(desiredURI != null) {
+ container.activate(desiredURI, 5);
+ desiredURI.removeFrom(container);
+ }
+ if(clientMetadata != null) {
+ container.activate(clientMetadata, 5);
+ clientMetadata.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ public void objectOnActivate(ObjectContainer container) {
+ // Cascading activation of dependancies
+ container.activate(data, 1); // will cascade
+ container.activate(desiredURI, 5);
+ }
+
+ /** Null out the data so it doesn't get removed in removeFrom().
+ * Call this when the data becomes somebody else's problem. */
+ public void nullData() {
+ data = null;
+ }
+
+ /** Null out the URI so it doesn't get removed in removeFrom().
+ * Call this when the URI becomes somebody else's problem. */
+ public void nullURI() {
+ this.desiredURI = null;
+ }
+
+ public void nullMetadata() {
+ this.clientMetadata = null;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/InsertContext.java
===================================================================
--- trunk/freenet/src/freenet/client/InsertContext.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/InsertContext.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,26 +3,20 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
-import freenet.client.async.BackgroundBlockEncoder;
-import freenet.client.async.USKManager;
+import com.db4o.ObjectContainer;
+
import freenet.client.events.ClientEventProducer;
import freenet.client.events.SimpleEventProducer;
-import freenet.crypt.RandomSource;
-import freenet.support.Executor;
import freenet.support.api.BucketFactory;
-import freenet.support.compress.RealCompressor;
-import freenet.support.io.NullPersistentFileTracker;
import freenet.support.io.PersistentFileTracker;
/** Context object for an insert operation, including both simple and
multi-file inserts */
public class InsertContext {
- public final BucketFactory bf;
public final BucketFactory persistentBucketFactory;
public final PersistentFileTracker persistentFileTracker;
/** If true, don't try to compress the data */
public boolean dontCompress;
- public final RandomSource random;
public final short splitfileAlgorithm;
public int maxInsertRetries;
final int maxSplitInsertThreads;
@@ -32,19 +26,12 @@
public final ClientEventProducer eventProducer;
/** Interesting tradeoff, see comments at top of Node.java. */
public final boolean cacheLocalRequests;
- public final USKManager uskManager;
- public final BackgroundBlockEncoder backgroundBlockEncoder;
- public final Executor executor;
- public final RealCompressor compressor;
- public InsertContext(BucketFactory bf, BucketFactory persistentBF,
PersistentFileTracker tracker, RandomSource random,
+ public InsertContext(BucketFactory bf, BucketFactory persistentBF,
PersistentFileTracker tracker,
int maxRetries, int rnfsToSuccess, int maxThreads, int
splitfileSegmentDataBlocks, int splitfileSegmentCheckBlocks,
- ClientEventProducer eventProducer, boolean
cacheLocalRequests, USKManager uskManager, BackgroundBlockEncoder blockEncoder,
Executor executor, RealCompressor compressor) {
- this.bf = bf;
+ ClientEventProducer eventProducer, boolean
cacheLocalRequests) {
this.persistentFileTracker = tracker;
this.persistentBucketFactory = persistentBF;
- this.uskManager = uskManager;
- this.random = random;
dontCompress = false;
splitfileAlgorithm = Metadata.SPLITFILE_ONION_STANDARD;
this.consecutiveRNFsCountAsSuccess = rnfsToSuccess;
@@ -54,37 +41,11 @@
this.splitfileSegmentDataBlocks = splitfileSegmentDataBlocks;
this.splitfileSegmentCheckBlocks = splitfileSegmentCheckBlocks;
this.cacheLocalRequests = cacheLocalRequests;
- this.backgroundBlockEncoder = blockEncoder;
- this.executor = executor;
- this.compressor = compressor;
}
- public InsertContext(InsertContext ctx, SimpleEventProducer producer,
boolean forceNonPersistent) {
- this.persistentFileTracker = forceNonPersistent ?
NullPersistentFileTracker.getInstance() : ctx.persistentFileTracker;
- this.uskManager = ctx.uskManager;
- this.bf = ctx.bf;
- this.persistentBucketFactory = forceNonPersistent ? ctx.bf :
ctx.persistentBucketFactory;
- this.random = ctx.random;
- this.dontCompress = ctx.dontCompress;
- this.splitfileAlgorithm = ctx.splitfileAlgorithm;
- this.consecutiveRNFsCountAsSuccess =
ctx.consecutiveRNFsCountAsSuccess;
- this.maxInsertRetries = ctx.maxInsertRetries;
- this.maxSplitInsertThreads = ctx.maxSplitInsertThreads;
- this.eventProducer = producer;
- this.splitfileSegmentDataBlocks =
ctx.splitfileSegmentDataBlocks;
- this.splitfileSegmentCheckBlocks =
ctx.splitfileSegmentCheckBlocks;
- this.cacheLocalRequests = ctx.cacheLocalRequests;
- this.backgroundBlockEncoder = ctx.backgroundBlockEncoder;
- this.executor = ctx.executor;
- this.compressor = ctx.compressor;
- }
-
public InsertContext(InsertContext ctx, SimpleEventProducer producer) {
this.persistentFileTracker = ctx.persistentFileTracker;
- this.uskManager = ctx.uskManager;
- this.bf = ctx.bf;
this.persistentBucketFactory = ctx.persistentBucketFactory;
- this.random = ctx.random;
this.dontCompress = ctx.dontCompress;
this.splitfileAlgorithm = ctx.splitfileAlgorithm;
this.consecutiveRNFsCountAsSuccess =
ctx.consecutiveRNFsCountAsSuccess;
@@ -94,9 +55,12 @@
this.splitfileSegmentDataBlocks =
ctx.splitfileSegmentDataBlocks;
this.splitfileSegmentCheckBlocks =
ctx.splitfileSegmentCheckBlocks;
this.cacheLocalRequests = ctx.cacheLocalRequests;
- this.backgroundBlockEncoder = ctx.backgroundBlockEncoder;
- this.executor = ctx.executor;
- this.compressor = ctx.compressor;
}
+ public void removeFrom(ObjectContainer container) {
+ container.activate(eventProducer, 1);
+ eventProducer.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/client/InsertException.java
===================================================================
--- trunk/freenet/src/freenet/client/InsertException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/InsertException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,8 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.l10n.L10n;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
public class InsertException extends Exception {
@@ -23,11 +26,23 @@
return mode;
}
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
public InsertException(int m, String msg, FreenetURI expectedURI) {
super(getMessage(m)+": "+msg);
extra = msg;
mode = m;
- if(Logger.shouldLog(Logger.MINOR, getClass()))
+ if(logMINOR)
Logger.minor(this, "Creating InsertException:
"+getMessage(mode)+": "+msg, this);
errorCodes = null;
this.uri = expectedURI;
@@ -37,7 +52,7 @@
super(getMessage(m));
extra = null;
mode = m;
- if(Logger.shouldLog(Logger.MINOR, getClass()))
+ if(logMINOR)
Logger.minor(this, "Creating InsertException:
"+getMessage(mode), this);
errorCodes = null;
this.uri = expectedURI;
@@ -46,7 +61,7 @@
public InsertException(int mode, Throwable e, FreenetURI expectedURI) {
super(getMessage(mode)+": "+e.getMessage());
extra = e.getMessage();
- if(Logger.shouldLog(Logger.MINOR, getClass()))
+ if(logMINOR)
Logger.minor(this, "Creating InsertException:
"+getMessage(mode)+": "+e, e);
this.mode = mode;
errorCodes = null;
@@ -58,7 +73,7 @@
super(getMessage(mode));
extra = null;
this.mode = mode;
- if(Logger.shouldLog(Logger.MINOR, getClass()))
+ if(logMINOR)
Logger.minor(this, "Creating InsertException:
"+getMessage(mode), this);
this.errorCodes = errorCodes;
this.uri = expectedURI;
@@ -72,6 +87,17 @@
this.uri = null;
}
+ public InsertException(InsertException e) {
+ super(e.getMessage());
+ extra = e.extra;
+ mode = e.mode;
+ errorCodes = e.errorCodes.clone();
+ if(e.uri == null)
+ uri = null;
+ else
+ uri = e.uri.clone();
+ }
+
/** Caller supplied a URI we cannot use */
public static final int INVALID_URI = 1;
/** Failed to read from or write to a bucket; a kind of internal error
*/
@@ -153,4 +179,24 @@
mode = TOO_MANY_RETRIES_IN_BLOCKS;
return new InsertException(mode, errors, null);
}
+
+ public InsertException clone() {
+ return new InsertException(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(errorCodes != null) {
+ container.activate(errorCodes, 1);
+ errorCodes.removeFrom(container);
+ }
+ if(uri != null) {
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ }
+ StackTraceElement[] elements = getStackTrace();
+ if(elements != null)
+ for(StackTraceElement element : elements)
+ container.delete(element);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/Metadata.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -16,6 +16,9 @@
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
+
+import com.db4o.ObjectContainer;
+
import freenet.keys.BaseClientKey;
import freenet.keys.ClientCHK;
import freenet.keys.FreenetURI;
@@ -111,6 +114,9 @@
/** The simple redirect key */
FreenetURI simpleRedirectKey;
+ /** Metadata is sometimes used as a key in hashtables. Therefore it
needs a persistent hashCode. */
+ private final int hashCode;
+
short splitfileAlgorithm;
static public final short SPLITFILE_NONREDUNDANT = 0;
static public final short SPLITFILE_ONION_STANDARD = 1;
@@ -180,9 +186,14 @@
this(new DataInputStream(new ByteArrayInputStream(data)),
data.length);
}
+ public int hashCode() {
+ return hashCode;
+ }
+
/** Parse some metadata from a DataInputStream
* @throws IOException If an I/O error occurs, or the data is
incomplete. */
public Metadata(DataInputStream dis, long length) throws IOException,
MetadataParseException {
+ hashCode = super.hashCode();
long magic = dis.readLong();
if(magic != FREENET_METADATA_MAGIC)
throw new MetadataParseException("Invalid magic
"+magic);
@@ -372,6 +383,7 @@
* Create an empty Metadata object
*/
private Metadata() {
+ hashCode = super.hashCode();
// Should be followed by addRedirectionManifest
}
@@ -452,11 +464,17 @@
Metadata data = (Metadata) dir.get(key);
if(data == null)
throw new NullPointerException();
+ if(Logger.shouldLog(Logger.DEBUG, this))
+ Logger.debug(this, "Putting metadata
for "+key);
manifestEntries.put(key, data);
} else if(o instanceof HashMap) {
HashMap hm = (HashMap)o;
+ if(Logger.shouldLog(Logger.DEBUG, this))
+ Logger.debug(this, "Making metadata map
for "+key);
Metadata subMap =
mkRedirectionManifestWithMetadata(hm);
manifestEntries.put(key, subMap);
+ if(Logger.shouldLog(Logger.DEBUG, this))
+ Logger.debug(this, "Putting metadata
map for "+key);
}
}
}
@@ -468,6 +486,7 @@
* directories (more HashMap's)
*/
Metadata(HashMap dir, String prefix) {
+ hashCode = super.hashCode();
// Simple manifest - contains actual redirects.
// Not zip manifest, which is basically a redirect.
documentType = SIMPLE_MANIFEST;
@@ -500,6 +519,7 @@
* the archive to read from.
*/
public Metadata(byte docType, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE
compressionCodec, String arg, ClientMetadata cm) {
+ hashCode = super.hashCode();
if(docType == ARCHIVE_INTERNAL_REDIRECT) {
documentType = docType;
this.archiveType = archiveType;
@@ -520,6 +540,7 @@
* @param cm The client metadata, if any.
*/
public Metadata(byte docType, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE
compressionCodec, FreenetURI uri, ClientMetadata cm) {
+ hashCode = super.hashCode();
if((docType == SIMPLE_REDIRECT) || (docType ==
ARCHIVE_MANIFEST)) {
documentType = docType;
this.archiveType = archiveType;
@@ -531,6 +552,7 @@
setMIMEType(DefaultMIMETypes.DEFAULT_MIME_TYPE);
noMIME = true;
}
+ if(uri == null) throw new NullPointerException();
simpleRedirectKey = uri;
if(!(uri.getKeyType().equals("CHK") &&
!uri.hasMetaStrings()))
fullKeys = true;
@@ -540,6 +562,7 @@
public Metadata(short algo, ClientCHK[] dataURIs, ClientCHK[]
checkURIs, int segmentSize, int checkSegmentSize,
ClientMetadata cm, long dataLength, ARCHIVE_TYPE
archiveType, COMPRESSOR_TYPE compressionCodec, long decompressedLength, boolean
isMetadata) {
+ hashCode = super.hashCode();
if(isMetadata)
documentType = MULTI_LEVEL_METADATA;
else {
@@ -555,7 +578,9 @@
splitfileBlocks = dataURIs.length;
splitfileCheckBlocks = checkURIs.length;
splitfileDataKeys = dataURIs;
+ assert(keysValid(splitfileDataKeys));
splitfileCheckKeys = checkURIs;
+ assert(keysValid(splitfileCheckKeys));
clientMetadata = cm;
this.compressionCodec = compressionCodec;
this.decompressedLength = decompressedLength;
@@ -566,6 +591,12 @@
splitfileParams = Fields.intsToBytes(new int[] { segmentSize,
checkSegmentSize } );
}
+ private boolean keysValid(ClientCHK[] keys) {
+ for(int i=0;i<keys.length;i++)
+ if(keys[i].getNodeCHK().getRoutingKey() == null) return
false;
+ return true;
+ }
+
/**
* Set the MIME type to a string. Compresses it if possible for transit.
*/
@@ -657,6 +688,15 @@
public Metadata getDocument(String name) {
return (Metadata) manifestEntries.get(name);
}
+
+ /**
+ * Return and remove a specific document. Used in persistent requests
+ * so that when removeFrom() is called, the default document won't be
+ * removed, since it is being processed.
+ */
+ public Metadata grabDocument(String name) {
+ return (Metadata) manifestEntries.remove(name);
+ }
/**
* The default document is the one which has an empty name.
@@ -667,6 +707,15 @@
}
/**
+ * Return and remove the default document. Used in persistent requests
+ * so that when removeFrom() is called, the default document won't be
+ * removed, since it is being processed.
+ */
+ public Metadata grabDefaultDocument() {
+ return grabDocument("");
+ }
+
+ /**
* Get all documents in the manifest (ignores default doc).
* @throws MetadataParseException
*/
@@ -943,4 +992,48 @@
if(clientMetadata == null) return null;
return clientMetadata.getMIMEType();
}
+
+ public void removeFrom(ObjectContainer container) {
+ if(resolvedURI != null) {
+ container.activate(resolvedURI, 5);
+ resolvedURI.removeFrom(container);
+ }
+ if(simpleRedirectKey != null) {
+ container.activate(simpleRedirectKey, 5);
+ simpleRedirectKey.removeFrom(container);
+ }
+ if(splitfileDataKeys != null) {
+ for(ClientCHK key : splitfileDataKeys)
+ if(key != null) {
+ container.activate(key, 5);
+ key.removeFrom(container);
+ }
+ }
+ if(splitfileCheckKeys != null) {
+ for(ClientCHK key : splitfileCheckKeys)
+ if(key != null) {
+ container.activate(key, 5);
+ key.removeFrom(container);
+ }
+ }
+ if(manifestEntries != null) {
+ container.activate(manifestEntries, 2);
+ for(Object m : manifestEntries.values()) {
+ Metadata meta = (Metadata) m;
+ container.activate(meta, 1);
+ meta.removeFrom(container);
+ }
+ container.delete(manifestEntries);
+ }
+ if(clientMetadata != null) {
+ container.activate(clientMetadata, 1);
+ clientMetadata.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ public void clearSplitfileKeys() {
+ splitfileDataKeys = null;
+ splitfileCheckKeys = null;
+ }
}
Modified: trunk/freenet/src/freenet/client/PutWaiter.java
===================================================================
--- trunk/freenet/src/freenet/client/PutWaiter.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/client/PutWaiter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,5 +1,7 @@
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.BaseClientPutter;
import freenet.client.async.ClientCallback;
import freenet.client.async.ClientGetter;
@@ -13,27 +15,27 @@
private FreenetURI uri;
private InsertException error;
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
// Ignore
}
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
// Ignore
}
- public synchronized void onSuccess(BaseClientPutter state) {
+ public synchronized void onSuccess(BaseClientPutter state,
ObjectContainer container) {
succeeded = true;
finished = true;
notifyAll();
}
- public synchronized void onFailure(InsertException e, BaseClientPutter
state) {
+ public synchronized void onFailure(InsertException e, BaseClientPutter
state, ObjectContainer container) {
error = e;
finished = true;
notifyAll();
}
- public synchronized void onGeneratedURI(FreenetURI uri,
BaseClientPutter state) {
+ public synchronized void onGeneratedURI(FreenetURI uri,
BaseClientPutter state, ObjectContainer container) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "URI: "+uri);
if(this.uri == null)
@@ -59,11 +61,11 @@
throw new InsertException(InsertException.INTERNAL_ERROR, "Did
not succeed but no error", uri);
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Ignore
}
Modified: trunk/freenet/src/freenet/client/RealArchiveStoreItem.java
===================================================================
--- trunk/freenet/src/freenet/client/RealArchiveStoreItem.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/RealArchiveStoreItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,6 +4,7 @@
package freenet.client;
import freenet.keys.FreenetURI;
+import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.io.MultiReaderBucket;
@@ -52,6 +53,8 @@
@Override
void innerClose() {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "innerClose(): "+this);
bucket.free();
}
Modified: trunk/freenet/src/freenet/client/SplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitfileBlock.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/SplitfileBlock.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,5 +1,7 @@
package freenet.client;
+import com.db4o.ObjectContainer;
+
import freenet.support.api.Bucket;
public interface SplitfileBlock {
@@ -16,5 +18,7 @@
/** Set data */
abstract void setData(Bucket data);
+ abstract void storeTo(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -47,14 +47,14 @@
}
}
- public synchronized static FECCodec getInstance(int dataBlocks, int
checkBlocks, Executor executor) {
+ public synchronized static FECCodec getInstance(int dataBlocks, int
checkBlocks) {
MyKey key = new MyKey(dataBlocks, checkBlocks + dataBlocks);
StandardOnionFECCodec codec = (StandardOnionFECCodec)
recentlyUsedCodecs.get(key);
if(codec != null) {
recentlyUsedCodecs.push(key, codec);
return codec;
}
- codec = new StandardOnionFECCodec(executor, dataBlocks,
checkBlocks + dataBlocks);
+ codec = new StandardOnionFECCodec(dataBlocks, checkBlocks +
dataBlocks);
recentlyUsedCodecs.push(key, codec);
while(recentlyUsedCodecs.size() > MAX_CACHED_CODECS) {
recentlyUsedCodecs.popKey();
@@ -62,9 +62,18 @@
return codec;
}
- public StandardOnionFECCodec(Executor executor, int k, int n) {
- super(executor, k, n);
+ public StandardOnionFECCodec(int k, int n) {
+ super(k, n);
+ loadFEC();
+
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+
+ protected void loadFEC() {
+ synchronized(this) {
+ if(fec != null) return;
+ }
FECCode fec2 = null;
if(!noNative) {
try {
@@ -82,19 +91,22 @@
}
if (fec2 != null){
+ synchronized(this) {
fec = fec2;
+ }
} else {
- fec = new PureCode(k,n);
+ fec2 = new PureCode(k,n);
+ synchronized(this) {
+ fec = fec2;
+ }
}
// revert to below if above causes JVM crashes
// Worst performance, but decode crashes
// fec = new PureCode(k,n);
// Crashes are caused by bugs which cause to use 320/128 etc. -
n > 256, k < 256.
+ }
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- }
-
@Override
public int countCheckBlocks() {
return n-k;
@@ -104,4 +116,9 @@
public String toString() {
return super.toString()+":n="+n+",k="+k;
}
+
+ @Override
+ public short getAlgorithm() {
+ return Metadata.SPLITFILE_ONION_STANDARD;
+ }
}
Copied: trunk/freenet/src/freenet/client/TempFetchResult.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/TempFetchResult.java)
===================================================================
--- trunk/freenet/src/freenet/client/TempFetchResult.java
(rev 0)
+++ trunk/freenet/src/freenet/client/TempFetchResult.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,23 @@
+package freenet.client;
+
+import freenet.support.api.Bucket;
+
+public class TempFetchResult extends FetchResult {
+
+ public final boolean freeWhenDone;
+
+ public TempFetchResult(ClientMetadata dm, Bucket fetched, boolean
freeWhenDone) {
+ super(dm, fetched);
+ this.freeWhenDone = freeWhenDone;
+ }
+
+ /**
+ * If true, the recipient of this object is responsible for freeing the
data.
+ * If false, it is a reference to data held somewhere else, so doesn't
need to be freed.
+ * @return
+ */
+ public boolean freeWhenDone() {
+ return freeWhenDone;
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/BackgroundBlockEncoder.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BackgroundBlockEncoder.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BackgroundBlockEncoder.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,10 @@
import java.lang.ref.SoftReference;
import java.util.ArrayList;
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Query;
+
import freenet.node.PrioRunnable;
import freenet.support.Logger;
import freenet.support.io.NativeThread;
@@ -14,42 +18,72 @@
public class BackgroundBlockEncoder implements PrioRunnable {
// Minimize memory usage at the cost of having to encode from the end
- private final ArrayList<SoftReference<SingleBlockInserter>> queue;
+ private final ArrayList<SoftReference<Encodeable>> queue;
+ private ClientContext context;
public BackgroundBlockEncoder() {
- queue = new ArrayList<SoftReference<SingleBlockInserter>>();
+ queue = new ArrayList<SoftReference<Encodeable>>();
}
- public void queue(SingleBlockInserter sbi) {
- if(sbi.isCancelled()) return;
- if(sbi.resultingURI != null) return;
- SoftReference<SingleBlockInserter> ref = new
SoftReference<SingleBlockInserter>(sbi);
- synchronized(this) {
- queue.add(ref);
- Logger.minor(this, "Queueing encode of "+sbi);
- notifyAll();
+ public void setContext(ClientContext context) {
+ this.context = context;
+ }
+
+ public void queue(Encodeable sbi, ObjectContainer container,
ClientContext context) {
+ if(sbi.persistent()) {
+ queuePersistent(sbi, container, context);
+ runPersistentQueue(context);
+ } else {
+ SoftReference<Encodeable> ref = new
SoftReference<Encodeable>(sbi);
+ synchronized(this) {
+ queue.add(ref);
+ Logger.minor(this, "Queueing encode of "+sbi);
+ notifyAll();
+ }
}
}
- public void queue(SingleBlockInserter[] sbis) {
+ public void queue(SingleBlockInserter[] sbis, ObjectContainer
container, ClientContext context) {
synchronized(this) {
for(int i=0;i<sbis.length;i++) {
SingleBlockInserter inserter = sbis[i];
if(inserter == null) continue;
- if(inserter.isCancelled()) continue;
+ if(inserter.isCancelled(container)) continue;
if(inserter.resultingURI != null) continue;
+ if(inserter.persistent()) continue;
Logger.minor(this, "Queueing encode of
"+inserter);
- SoftReference<SingleBlockInserter> ref = new
SoftReference<SingleBlockInserter>(inserter);
+ SoftReference<Encodeable> ref = new
SoftReference<Encodeable>(inserter);
queue.add(ref);
}
notifyAll();
}
+ boolean anyPersistent = false;
+ for(int i=0;i<sbis.length;i++) {
+ anyPersistent = true;
+ SingleBlockInserter inserter = sbis[i];
+ if(inserter == null) continue;
+ if(inserter.isCancelled(container)) continue;
+ if(inserter.resultingURI != null) continue;
+ if(!inserter.persistent()) continue;
+ queuePersistent(inserter, container, context);
+ }
+ if(anyPersistent)
+ runPersistentQueue(context);
}
+ public void runPersistentQueue(ClientContext context) {
+ context.jobRunner.queue(runner, NativeThread.LOW_PRIORITY,
true);
+ }
+
+ private void queuePersistent(Encodeable sbi, ObjectContainer container,
ClientContext context) {
+ BackgroundBlockEncoderTag tag = new
BackgroundBlockEncoderTag(sbi, sbi.getPriorityClass(container), context);
+ container.store(tag);
+ }
+
public void run() {
freenet.support.Logger.OSThread.logPID(this);
while(true) {
- SingleBlockInserter sbi = null;
+ Encodeable sbi = null;
synchronized(this) {
while(queue.isEmpty()) {
try {
@@ -59,15 +93,13 @@
}
}
while(!queue.isEmpty()) {
- SoftReference<SingleBlockInserter> ref
= queue.remove(queue.size()-1);
+ SoftReference<Encodeable> ref =
queue.remove(queue.size()-1);
sbi = ref.get();
if(sbi != null) break;
}
}
Logger.minor(this, "Encoding "+sbi);
- if(sbi.isCancelled()) continue;
- if(sbi.resultingURI != null) continue;
- sbi.tryEncode();
+ sbi.tryEncode(null, context);
}
}
@@ -75,4 +107,51 @@
return NativeThread.MIN_PRIORITY;
}
+ static final int JOBS_PER_SLOT = 1;
+
+ private DBJob runner = new DBJob() {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+ Query query = container.query();
+ query.constrain(BackgroundBlockEncoderTag.class);
+ query.descend("nodeDBHandle").constrain(new
Long(context.nodeDBHandle));
+ query.descend("priority").orderAscending();
+ query.descend("addedTime").orderAscending();
+ ObjectSet results = query.execute();
+ for(int x = 0; x < JOBS_PER_SLOT && results.hasNext();
x++) {
+ BackgroundBlockEncoderTag tag =
(BackgroundBlockEncoderTag) results.next();
+ try {
+ Encodeable sbi = tag.inserter;
+ if(sbi == null) continue;
+ container.activate(sbi, 1);
+ sbi.tryEncode(container, context);
+ container.deactivate(sbi, 1);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t, t);
+ } finally {
+ container.delete(tag);
+ }
+ }
+ if(results.hasNext())
+ runPersistentQueue(context);
+ }
+
+ };
+
}
+
+class BackgroundBlockEncoderTag {
+ final Encodeable inserter;
+ final long nodeDBHandle;
+ /** For implementing FIFO ordering */
+ final long addedTime;
+ /** For implementing priority ordering */
+ final short priority;
+
+ BackgroundBlockEncoderTag(Encodeable inserter, short prio,
ClientContext context) {
+ this.inserter = inserter;
+ this.nodeDBHandle = context.nodeDBHandle;
+ this.addedTime = System.currentTimeMillis();
+ this.priority = prio;
+ }
+}
Modified: trunk/freenet/src/freenet/client/async/BaseClientGetter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BaseClientGetter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BaseClientGetter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,11 +3,13 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import freenet.node.RequestClient;
+
public abstract class BaseClientGetter extends ClientRequester implements
GetCompletionCallback {
- protected BaseClientGetter(short priorityClass, ClientRequestScheduler
chkScheduler, ClientRequestScheduler sskScheduler, Object client) {
- super(priorityClass, chkScheduler, sskScheduler, client);
+ protected BaseClientGetter(short priorityClass, RequestClient client) {
+ super(priorityClass, client);
}
}
Modified: trunk/freenet/src/freenet/client/async/BaseClientPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BaseClientPutter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BaseClientPutter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,12 +3,16 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
+import freenet.node.RequestClient;
+
public abstract class BaseClientPutter extends ClientRequester {
- protected BaseClientPutter(short priorityClass, ClientRequestScheduler
chkScheduler, ClientRequestScheduler sskScheduler, Object context) {
- super(priorityClass, chkScheduler, sskScheduler, context);
+ protected BaseClientPutter(short priorityClass, RequestClient context) {
+ super(priorityClass, context);
}
- public abstract void onMajorProgress();
+ public abstract void onMajorProgress(ObjectContainer container);
}
Modified: trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BaseSingleFileFetcher.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import java.util.Collections;
+import java.util.List;
+
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.keys.ClientKey;
import freenet.keys.ClientSSK;
@@ -10,55 +15,71 @@
import freenet.keys.KeyBlock;
import freenet.keys.KeyVerifyException;
import freenet.node.KeysFetchingLocally;
+import freenet.node.NullSendableRequestItem;
+import freenet.node.RequestClient;
import freenet.node.RequestScheduler;
import freenet.node.SendableGet;
+import freenet.node.SendableRequestItem;
import freenet.support.Executor;
import freenet.support.Logger;
-public abstract class BaseSingleFileFetcher extends SendableGet {
+public abstract class BaseSingleFileFetcher extends SendableGet implements
HasKeyListener {
final ClientKey key;
protected boolean cancelled;
+ protected boolean finished;
final int maxRetries;
private int retryCount;
final FetchContext ctx;
- static final Object[] keys = new Object[] { Integer.valueOf(0) };
+ protected boolean deleteFetchContext;
+ static final SendableRequestItem[] keys = new SendableRequestItem[] {
NullSendableRequestItem.nullItem };
/** It is essential that we know when the cooldown will end, otherwise
we cannot
* remove the key from the queue if we are killed before that */
long cooldownWakeupTime;
- protected BaseSingleFileFetcher(ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent) {
+ protected BaseSingleFileFetcher(ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent, boolean deleteFetchContext) {
super(parent);
+ this.deleteFetchContext = deleteFetchContext;
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Creating BaseSingleFileFetcher for
"+key);
retryCount = 0;
this.maxRetries = maxRetries;
this.key = key;
this.ctx = ctx;
+ if(ctx == null) throw new NullPointerException();
+ if(key == null) throw new NullPointerException();
cooldownWakeupTime = -1;
}
@Override
- public Object[] allKeys() {
+ public SendableRequestItem[] allKeys(ObjectContainer container,
ClientContext context) {
return keys;
}
@Override
- public Object[] sendableKeys() {
+ public SendableRequestItem[] sendableKeys(ObjectContainer container,
ClientContext context) {
return keys;
}
@Override
- public Object chooseKey(KeysFetchingLocally fetching) {
+ public SendableRequestItem chooseKey(KeysFetchingLocally fetching,
ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(key, 5);
if(fetching.hasKey(key.getNodeKey())) return null;
return keys[0];
}
@Override
- public boolean hasValidKeys(KeysFetchingLocally fetching) {
+ public boolean hasValidKeys(KeysFetchingLocally fetching,
ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(key, 5);
return !fetching.hasKey(key.getNodeKey());
}
@Override
- public ClientKey getKey(Object token) {
+ public ClientKey getKey(Object token, ObjectContainer container) {
+ if(persistent)
+ container.activate(key, 5);
return key;
}
@@ -72,33 +93,41 @@
return key instanceof ClientSSK;
}
- /**
- * Try again - returns true if we can retry
- * @param sched
- * @param the executor we will use to run the retry off-thread
- */
- protected boolean retry(RequestScheduler sched, Executor exec) {
+ /** Try again - returns true if we can retry
+ * @param sched */
+ protected boolean retry(ObjectContainer container, ClientContext
context) {
retryCount++;
if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Attempting to retry... (max
"+maxRetries+", current "+retryCount+ ')');
+ Logger.minor(this, "Attempting to retry... (max
"+maxRetries+", current "+retryCount+") on "+this);
// We want 0, 1, ... maxRetries i.e. maxRetries+1 attempts
(maxRetries=0 => try once, no retries, maxRetries=1 = original try + 1 retry)
if((retryCount <= maxRetries) || (maxRetries == -1)) {
- if(retryCount % ClientRequestScheduler.COOLDOWN_RETRIES
== 0) {
+ if(persistent)
+ container.store(this);
+ if(retryCount % RequestScheduler.COOLDOWN_RETRIES == 0)
{
// Add to cooldown queue. Don't reschedule yet.
long now = System.currentTimeMillis();
- if(cooldownWakeupTime > now)
- Logger.error(this, "Already on the
cooldown queue for "+this, new Exception("error"));
- else
- cooldownWakeupTime = sched.queueCooldown(key,
this);
+ if(cooldownWakeupTime > now) {
+ Logger.error(this, "Already on the
cooldown queue for "+this+" until
"+freenet.support.TimeUtil.formatTime(cooldownWakeupTime - now), new
Exception("error"));
+ // We must be registered ... unregister
+ unregister(container, context);
+ } else {
+ if(Logger.shouldLog(Logger.MINOR,
this)) Logger.minor(this, "Adding to cooldown queue "+this);
+ if(persistent)
+ container.activate(key, 5);
+ RequestScheduler sched =
context.getFetchScheduler(key instanceof ClientSSK);
+ cooldownWakeupTime =
sched.queueCooldown(key, this, container);
+ if(persistent)
+ container.deactivate(key, 5);
+ // Unregister as going to cooldown
queue.
+ unregister(container, context);
+ }
} else {
- exec.execute(new Runnable() {
- public void run() {
- schedule();
+ unregister(container, context);
+ reschedule(container, context);
}
- }, "Retry executor for "+sched.toString());
- }
return true; // We will retry in any case, maybe not
just not yet. See requeueAfterCooldown(Key).
}
+ unregister(container, context);
return false;
}
@@ -113,8 +142,10 @@
}
@Override
- public short getPriorityClass() {
- return parent.getPriorityClass();
+ public short getPriorityClass(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1); // Not much point
deactivating it
+ short retval = parent.getPriorityClass();
+ return retval;
}
@Override
@@ -122,83 +153,200 @@
return ctx.ignoreStore;
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
synchronized(this) {
cancelled = true;
}
- super.unregister(false);
+ if(persistent) {
+ container.store(this);
+ container.activate(key, 5);
+ }
+
+ unregisterAll(container, context);
}
+
+ /**
+ * Remove the pendingKeys item and then remove from the queue as well.
+ * Call unregister(container) if you only want to remove from the queue.
+ */
+ public void unregisterAll(ObjectContainer container, ClientContext
context) {
+ getScheduler(context).removePendingKeys(this, false);
+ super.unregister(container, context);
+ }
@Override
- public synchronized boolean isCancelled() {
+ public synchronized boolean isCancelled(ObjectContainer container) {
return cancelled;
}
- public synchronized boolean isEmpty() {
- return cancelled;
+ public synchronized boolean isEmpty(ObjectContainer container) {
+ return cancelled || finished;
}
@Override
- public Object getClient() {
+ public RequestClient getClient(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1);
return parent.getClient();
}
@Override
- public boolean dontCache() {
+ public boolean dontCache(ObjectContainer container) {
+ if(persistent) container.activate(ctx, 1);
return !ctx.cacheLocalRequests;
}
- public boolean canRemove() {
- // Simple request, once it's sent, it's sent. May be requeued
at a different # retries.
- return true;
- }
-
- @Override
- public void onGotKey(Key key, KeyBlock block, RequestScheduler sched) {
+ public void onGotKey(Key key, KeyBlock block, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(key, 5);
+ container.activate(this.key, 5);
+ }
synchronized(this) {
- if(isCancelled()) return;
+ if(finished) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "onGotKey() called
twice on "+this, new Exception("debug"));
+ return;
+ }
+ finished = true;
+ if(persistent)
+ container.store(this);
+ if(isCancelled(container)) return;
+ if(key == null)
+ throw new NullPointerException();
+ if(this.key == null)
+ throw new NullPointerException("Key is null on
"+this);
if(!key.equals(this.key.getNodeKey())) {
Logger.normal(this, "Got sent key "+key+" but
want "+this.key+" for "+this);
return;
}
}
+ unregister(container, context); // Key has already been removed
from pendingKeys
try {
- onSuccess(Key.createKeyBlock(this.key, block), false,
null, sched);
+ onSuccess(Key.createKeyBlock(this.key, block), false,
null, container, context);
} catch (KeyVerifyException e) {
Logger.error(this, "onGotKey("+key+","+block+") got
"+e+" for "+this, e);
// FIXME if we get rid of the direct route this must
call onFailure()
}
+ if(persistent) {
+ container.deactivate(this, 1);
+ container.deactivate(this.key, 1);
+ }
}
@Override
- public long getCooldownWakeup(Object token) {
+ public long getCooldownWakeup(Object token, ObjectContainer container) {
return cooldownWakeupTime;
}
-
+
@Override
- public long getCooldownWakeupByKey(Key key) {
+ public long getCooldownWakeupByKey(Key key, ObjectContainer container) {
return cooldownWakeupTime;
}
@Override
- public synchronized void resetCooldownTimes() {
+ public synchronized void resetCooldownTimes(ObjectContainer container) {
cooldownWakeupTime = -1;
+ if(persistent)
+ container.store(this);
}
-
+
@Override
- public void requeueAfterCooldown(Key key, long time) {
+ public void requeueAfterCooldown(Key key, long time, ObjectContainer
container, ClientContext context) {
if(cooldownWakeupTime > time) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Not requeueing as deadline has not passed yet");
return;
}
+ if(persistent)
+ container.activate(this.key, 5);
if(!(key.equals(this.key.getNodeKey()))) {
Logger.error(this, "Got requeueAfterCooldown for wrong
key: "+key+" but mine is "+this.key.getNodeKey()+" for "+this.key);
return;
}
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Requeueing after cooldown "+key+"
for "+this);
- schedule();
+ reschedule(container, context);
+ if(persistent)
+ container.deactivate(this.key, 5);
}
+
+ public void schedule(ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(ctx, 1);
+ if(ctx.blocks != null)
+ container.activate(ctx.blocks, 5);
+ }
+ try {
+ getScheduler(context).register(this, new SendableGet[]
{ this }, persistent, true, container, ctx.blocks, false);
+ } catch (KeyListenerConstructionException e) {
+ Logger.error(this, "Impossible: "+e+" on "+this, e);
+ }
+ }
+ public void reschedule(ObjectContainer container, ClientContext
context) {
+ if(persistent) {
+ container.activate(ctx, 1);
+ if(ctx.blocks != null)
+ container.activate(ctx.blocks, 5);
+ }
+ try {
+ getScheduler(context).register(null, new SendableGet[]
{ this }, persistent, true, container, ctx.blocks, true);
+ } catch (KeyListenerConstructionException e) {
+ Logger.error(this, "Impossible: "+e+" on "+this, e);
+ }
+ }
+
+ public SendableGet getRequest(Key key, ObjectContainer container) {
+ return this;
+ }
+
+ public Key[] listKeys(ObjectContainer container) {
+ if(cancelled || finished)
+ return new Key[0];
+ else {
+ if(persistent)
+ container.activate(key, 5);
+ return new Key[] { key.getNodeKey() };
+ }
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+ container.activate(key, 5);
+ ClientKey ckey = key.cloneKey();
+ PersistentChosenBlock block = new PersistentChosenBlock(false,
request, keys[0], ckey.getNodeKey(), ckey, sched);
+ return Collections.singletonList(block);
+ }
+
+ public KeyListener makeKeyListener(ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(key, 5);
+ container.activate(parent, 1);
+ container.activate(ctx, 1);
+ }
+ if(finished) return null;
+ if(cancelled) return null;
+ Key newKey = key.getNodeKey().cloneKey();
+ short prio = parent.getPriorityClass();
+ boolean dontCache = !ctx.cacheLocalRequests;
+ KeyListener ret = new SingleKeyListener(newKey, this,
dontCache, prio, persistent);
+ if(persistent) {
+ container.deactivate(key, 5);
+ container.deactivate(parent, 1);
+ container.deactivate(ctx, 1);
+ }
+ return ret;
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ super.removeFrom(container, context);
+ if(deleteFetchContext) {
+ container.activate(ctx, 1);
+ ctx.removeFrom(container);
+ }
+ container.activate(key, 5);
+ key.removeFrom(container);
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/BinaryBlobInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BinaryBlobInserter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BinaryBlobInserter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.util.Iterator;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.FailureCodeTracker;
import freenet.client.InsertContext;
import freenet.client.InsertException;
@@ -13,6 +15,7 @@
import freenet.keys.KeyBlock;
import freenet.keys.SSKBlock;
import freenet.node.LowLevelPutException;
+import freenet.node.RequestClient;
import freenet.node.SimpleSendableInsert;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
@@ -21,7 +24,7 @@
public class BinaryBlobInserter implements ClientPutState {
final ClientPutter parent;
- final Object clientContext;
+ final RequestClient clientContext;
final MySendableInsert[] inserters;
final FailureCodeTracker errors;
final int maxRetries;
@@ -32,7 +35,7 @@
private boolean fatal;
final InsertContext ctx;
- BinaryBlobInserter(Bucket blob, ClientPutter parent, Object
clientContext, boolean tolerant, short prioClass, InsertContext ctx)
+ BinaryBlobInserter(Bucket blob, ClientPutter parent, RequestClient
clientContext, boolean tolerant, short prioClass, InsertContext ctx,
ClientContext context, ObjectContainer container)
throws IOException, BinaryBlobFormatException {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.ctx = ctx;
@@ -57,29 +60,29 @@
Key key = (Key) i.next();
KeyBlock block = blocks.get(key);
MySendableInsert inserter =
- new MySendableInsert(x++, block, prioClass,
getScheduler(block), clientContext);
+ new MySendableInsert(x++, block, prioClass,
getScheduler(block, context), clientContext);
myInserters.add(inserter);
}
inserters = (MySendableInsert[]) myInserters.toArray(new
MySendableInsert[myInserters.size()]);
- parent.addMustSucceedBlocks(inserters.length);
- parent.notifyClients();
+ parent.addMustSucceedBlocks(inserters.length, container);
+ parent.notifyClients(container, context);
}
- private ClientRequestScheduler getScheduler(KeyBlock block) {
+ private ClientRequestScheduler getScheduler(KeyBlock block,
ClientContext context) {
if(block instanceof CHKBlock)
- return parent.chkScheduler;
+ return context.getChkInsertScheduler();
else if(block instanceof SSKBlock)
- return parent.sskScheduler;
+ return context.getSskInsertScheduler();
else throw new IllegalArgumentException("Unknown block type
"+block.getClass()+" : "+block);
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
for(int i=0;i<inserters.length;i++) {
if(inserters[i] != null)
- inserters[i].cancel();
+ inserters[i].cancel(container, context);
}
- parent.onFailure(new
InsertException(InsertException.CANCELLED), this);
+ parent.onFailure(new
InsertException(InsertException.CANCELLED), this, container, context);
}
public BaseClientPutter getParent() {
@@ -95,7 +98,7 @@
return clientContext;
}
- public void schedule() throws InsertException {
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
for(int i=0;i<inserters.length;i++) {
inserters[i].schedule();
}
@@ -107,36 +110,36 @@
private int consecutiveRNFs;
private int retries;
- public MySendableInsert(int i, KeyBlock block, short prioClass,
ClientRequestScheduler scheduler, Object client) {
+ public MySendableInsert(int i, KeyBlock block, short prioClass,
ClientRequestScheduler scheduler, RequestClient client) {
super(block, prioClass, client, scheduler);
this.blockNum = i;
}
- public void onSuccess() {
+ public void onSuccess(ObjectContainer container, ClientContext
context) {
synchronized(this) {
if(inserters[blockNum] == null) return;
inserters[blockNum] = null;
completedBlocks++;
succeededBlocks++;
}
- parent.completedBlock(false);
- maybeFinish();
+ parent.completedBlock(false, container, context);
+ maybeFinish(container, context);
}
// FIXME duplicated code from SingleBlockInserter
// FIXME combine it somehow
- public void onFailure(LowLevelPutException e) {
+ public void onFailure(LowLevelPutException e, Object keyNum,
ObjectContainer container, ClientContext context) {
synchronized(BinaryBlobInserter.this) {
if(inserters[blockNum] == null) return;
}
if(parent.isCancelled()) {
- fail(new
InsertException(InsertException.CANCELLED), true);
+ fail(new
InsertException(InsertException.CANCELLED), true, container, context);
return;
}
logMINOR = Logger.shouldLog(Logger.MINOR,
BinaryBlobInserter.this);
switch(e.code) {
case LowLevelPutException.COLLISION:
- fail(new
InsertException(InsertException.COLLISION), false);
+ fail(new
InsertException(InsertException.COLLISION), false, container, context);
break;
case LowLevelPutException.INTERNAL_ERROR:
errors.inc(InsertException.INTERNAL_ERROR);
@@ -159,7 +162,7 @@
if(logMINOR) Logger.minor(this, "Consecutive
RNFs: "+consecutiveRNFs+" / "+consecutiveRNFsCountAsSuccess);
if(consecutiveRNFs ==
consecutiveRNFsCountAsSuccess) {
if(logMINOR) Logger.minor(this,
"Consecutive RNFs: "+consecutiveRNFs+" - counting as success");
- onSuccess();
+ onSuccess(container, context);
return;
}
} else
@@ -167,14 +170,14 @@
if(logMINOR) Logger.minor(this, "Failed: "+e);
retries++;
if((retries > maxRetries) && (maxRetries != -1)) {
- fail(InsertException.construct(errors), false);
+ fail(InsertException.construct(errors), false,
container, context);
return;
}
// Retry *this block*
this.schedule();
}
- private void fail(InsertException e, boolean fatal) {
+ private void fail(InsertException e, boolean fatal,
ObjectContainer container, ClientContext context) {
synchronized(BinaryBlobInserter.this) {
if(inserters[blockNum] == null) return;
inserters[blockNum] = null;
@@ -182,10 +185,10 @@
if(fatal) BinaryBlobInserter.this.fatal = true;
}
if(fatal)
- parent.fatallyFailedBlock();
+ parent.fatallyFailedBlock(container, context);
else
- parent.failedBlock();
- maybeFinish();
+ parent.failedBlock(container, context);
+ maybeFinish(container, context);
}
@Override
@@ -194,7 +197,7 @@
}
}
- public void maybeFinish() {
+ public void maybeFinish(ObjectContainer container, ClientContext
context) {
boolean success;
boolean wasFatal;
synchronized(this) {
@@ -204,11 +207,16 @@
wasFatal = fatal;
}
if(success) {
- parent.onSuccess(this);
+ parent.onSuccess(this, container, context);
} else if(wasFatal)
- parent.onFailure(new
InsertException(InsertException.FATAL_ERRORS_IN_BLOCKS, errors, null), this);
+ parent.onFailure(new
InsertException(InsertException.FATAL_ERRORS_IN_BLOCKS, errors, null), this,
container, context);
else
- parent.onFailure(new
InsertException(InsertException.TOO_MANY_RETRIES_IN_BLOCKS, errors, null),
this);
+ parent.onFailure(new
InsertException(InsertException.TOO_MANY_RETRIES_IN_BLOCKS, errors, null),
this, container, context);
}
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ // FIXME: Persistent blob inserts are not supported.
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/client/async/BlockSet.java
===================================================================
--- trunk/freenet/src/freenet/client/async/BlockSet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/BlockSet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.util.Set;
+import com.db4o.ObjectContainer;
+
import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.Key;
@@ -38,5 +40,5 @@
/** Get a high level block, given a high level key */
public ClientKeyBlock get(ClientKey key);
-
+
}
Copied: trunk/freenet/src/freenet/client/async/ChosenBlock.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/ChosenBlock.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/ChosenBlock.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/ChosenBlock.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,68 @@
+package freenet.client.async;
+
+import freenet.keys.ClientKey;
+import freenet.keys.Key;
+import freenet.node.LowLevelGetException;
+import freenet.node.LowLevelPutException;
+import freenet.node.NodeClientCore;
+import freenet.node.RequestScheduler;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
+
+/**
+ * A single selected request, including everything needed to execute it.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public abstract class ChosenBlock {
+
+ /** The token indicating the key within the request to be
fetched/inserted.
+ * Meaning is entirely defined by the request. */
+ public transient final SendableRequestItem token;
+ /** The key to be fetched, null if not a BaseSendableGet */
+ public transient final Key key;
+ /** The client-layer key to be fetched, null if not a SendableGet */
+ public transient final ClientKey ckey;
+ public transient final boolean localRequestOnly;
+ public transient final boolean cacheLocalRequests;
+ public transient final boolean ignoreStore;
+
+ public ChosenBlock(SendableRequestItem token, Key key, ClientKey ckey,
boolean localRequestOnly, boolean cacheLocalRequests, boolean ignoreStore,
RequestScheduler sched) {
+ this.token = token;
+ this.key = key;
+ this.ckey = ckey;
+ this.localRequestOnly = localRequestOnly;
+ this.cacheLocalRequests = cacheLocalRequests;
+ this.ignoreStore = ignoreStore;
+ }
+
+ public abstract boolean isPersistent();
+
+ public abstract boolean isCancelled();
+
+ public abstract void onFailure(LowLevelPutException e, ClientContext
context);
+
+ public abstract void onInsertSuccess(ClientContext context);
+
+ public abstract void onFailure(LowLevelGetException e, ClientContext
context);
+
+ /**
+ * The actual data delivery goes through CRS.tripPendingKey(). This is
just a notification
+ * for book-keeping purposes. We call the scheduler to tell it that the
request succeeded,
+ * so that it can be rescheduled soon for more requests.
+ * @param context Might be useful.
+ */
+ public abstract void onFetchSuccess(ClientContext context);
+
+ public abstract short getPriority();
+
+ public boolean send(NodeClientCore core, RequestScheduler sched) {
+ ClientContext context = sched.getContext();
+ return getSender(context).send(core, sched, context, this);
+ }
+
+ public abstract SendableRequestSender getSender(ClientContext context);
+
+ public void onDumped() {
+ token.dump();
+ }
+}
Modified: trunk/freenet/src/freenet/client/async/ClientCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientCallback.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.client.InsertException;
@@ -15,20 +17,20 @@
*/
public interface ClientCallback {
- public void onSuccess(FetchResult result, ClientGetter state);
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container);
- public void onFailure(FetchException e, ClientGetter state);
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container);
- public void onSuccess(BaseClientPutter state);
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container);
- public void onFailure(InsertException e, BaseClientPutter state);
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container);
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state);
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container);
/** Called when freenet.async thinks that the request should be
serialized to
* disk, if it is a persistent request. */
- public void onMajorProgress();
+ public void onMajorProgress(ObjectContainer container);
/** Called when the inserted data is fetchable (don't rely on this) */
- public void onFetchable(BaseClientPutter state);
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container);
}
Copied: trunk/freenet/src/freenet/client/async/ClientContext.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/ClientContext.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientContext.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/ClientContext.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,183 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.Random;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.ArchiveManager;
+import freenet.client.FECQueue;
+import freenet.client.FetchException;
+import freenet.client.InsertException;
+import freenet.crypt.RandomSource;
+import freenet.node.NodeClientCore;
+import freenet.node.RequestScheduler;
+import freenet.node.RequestStarterGroup;
+import freenet.node.Ticker;
+import freenet.support.Executor;
+import freenet.support.Logger;
+import freenet.support.api.BucketFactory;
+import freenet.support.compress.RealCompressor;
+import freenet.support.io.FilenameGenerator;
+import freenet.support.io.NativeThread;
+import freenet.support.io.PersistentTempBucketFactory;
+
+/**
+ * Object passed in to client-layer operations, containing references to
essential but transient objects
+ * such as the schedulers and the FEC queue.
+ * @author toad
+ */
+public class ClientContext {
+
+ public transient final FECQueue fecQueue;
+ private transient ClientRequestScheduler sskFetchScheduler;
+ private transient ClientRequestScheduler chkFetchScheduler;
+ private transient ClientRequestScheduler sskInsertScheduler;
+ private transient ClientRequestScheduler chkInsertScheduler;
+ public transient final DBJobRunner jobRunner;
+ public transient final Executor mainExecutor;
+ public transient final long nodeDBHandle;
+ public transient final BackgroundBlockEncoder backgroundBlockEncoder;
+ public transient final RandomSource random;
+ public transient final ArchiveManager archiveManager;
+ public transient final PersistentTempBucketFactory
persistentBucketFactory;
+ public transient final BucketFactory tempBucketFactory;
+ public transient final HealingQueue healingQueue;
+ public transient final USKManager uskManager;
+ public transient final Random fastWeakRandom;
+ public transient final long bootID;
+ public transient final Ticker ticker;
+ public transient final FilenameGenerator fg;
+ public transient final FilenameGenerator persistentFG;
+ public transient final RealCompressor rc;
+
+ public ClientContext(NodeClientCore core, FECQueue fecQueue, Executor
mainExecutor,
+ BackgroundBlockEncoder blockEncoder, ArchiveManager
archiveManager,
+ PersistentTempBucketFactory ptbf, BucketFactory tbf,
HealingQueue hq,
+ USKManager uskManager, RandomSource strongRandom,
+ Random fastWeakRandom, Ticker ticker,
+ FilenameGenerator fg, FilenameGenerator persistentFG,
RealCompressor rc) {
+ this.bootID = core.node.bootID;
+ this.fecQueue = fecQueue;
+ jobRunner = core;
+ this.mainExecutor = mainExecutor;
+ this.nodeDBHandle = core.node.nodeDBHandle;
+ this.backgroundBlockEncoder = blockEncoder;
+ this.random = strongRandom;
+ this.archiveManager = archiveManager;
+ this.persistentBucketFactory = ptbf;
+ if(persistentBucketFactory == null) throw new
NullPointerException();
+ this.tempBucketFactory = tbf;
+ if(tempBucketFactory == null) throw new NullPointerException();
+ this.healingQueue = hq;
+ this.uskManager = uskManager;
+ this.fastWeakRandom = fastWeakRandom;
+ this.ticker = ticker;
+ this.fg = fg;
+ this.persistentFG = persistentFG;
+ this.rc = rc;
+ }
+
+ public void init(RequestStarterGroup starters) {
+ this.sskFetchScheduler = starters.sskFetchScheduler;
+ this.chkFetchScheduler = starters.chkFetchScheduler;
+ this.sskInsertScheduler = starters.sskPutScheduler;
+ this.chkInsertScheduler = starters.chkPutScheduler;
+ }
+
+ public ClientRequestScheduler getSskFetchScheduler() {
+ return sskFetchScheduler;
+ }
+
+ public ClientRequestScheduler getChkFetchScheduler() {
+ return chkFetchScheduler;
+ }
+
+ public ClientRequestScheduler getSskInsertScheduler() {
+ return sskInsertScheduler;
+ }
+
+ public ClientRequestScheduler getChkInsertScheduler() {
+ return chkInsertScheduler;
+ }
+
+ public void start(final ClientPutter inserter, final boolean
earlyEncode) throws InsertException {
+ if(inserter.persistent()) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(inserter, 1);
+ try {
+ inserter.start(earlyEncode,
false, container, context);
+ } catch (InsertException e) {
+ inserter.client.onFailure(e,
inserter, container);
+ }
+ container.deactivate(inserter, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ } else {
+ inserter.start(earlyEncode, false, null, this);
+ }
+ }
+
+ public void start(final ClientGetter getter) throws FetchException {
+ if(getter.persistent()) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(getter, 1);
+ try {
+ getter.start(container,
context);
+ } catch (FetchException e) {
+
getter.clientCallback.onFailure(e, getter, container);
+ }
+ container.deactivate(getter, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ } else {
+ getter.start(null, this);
+ }
+ }
+
+ public void start(final SimpleManifestPutter inserter) throws
InsertException {
+ if(inserter.persistent()) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(inserter, 1);
+ try {
+ inserter.start(container,
context);
+ } catch (InsertException e) {
+ inserter.cb.onFailure(e,
inserter, container);
+ }
+ container.deactivate(inserter, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ } else {
+ inserter.start(null, this);
+ }
+ }
+
+ public BucketFactory getBucketFactory(boolean persistent) {
+ if(persistent)
+ return persistentBucketFactory;
+ else
+ return tempBucketFactory;
+ }
+
+ public RequestScheduler getFetchScheduler(boolean ssk) {
+ if(ssk) return sskFetchScheduler;
+ return chkFetchScheduler;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing ClientContext in database", new
Exception("error"));
+ return false;
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/ClientGetState.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetState.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientGetState.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,15 +3,25 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
/**
* A ClientGetState.
* Represents a stage in the fetch process.
*/
public interface ClientGetState {
- public void schedule();
+ public void schedule(ObjectContainer container, ClientContext context)
throws KeyListenerConstructionException;
- public void cancel();
+ public void cancel(ObjectContainer container, ClientContext context);
public long getToken();
+
+ /**
+ * Once the callback has finished with this fetch, it will call
removeFrom() to instruct the fetch
+ * to remove itself and all its subsidiary objects from the database.
+ * WARNING: It is possible that the caller will get deactivated! Be
careful...
+ * @param container
+ */
+ public void removeFrom(ObjectContainer container, ClientContext
context);
}
Modified: trunk/freenet/src/freenet/client/async/ClientGetter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientGetter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,6 +9,8 @@
import java.net.MalformedURLException;
import java.util.HashSet;
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveContext;
import freenet.client.ClientMetadata;
import freenet.client.FetchContext;
@@ -18,18 +20,30 @@
import freenet.keys.ClientKeyBlock;
import freenet.keys.FreenetURI;
import freenet.keys.Key;
-import freenet.node.PrioRunnable;
-import freenet.node.RequestStarter;
+import freenet.node.RequestClient;
+import freenet.node.RequestScheduler;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.io.BucketTools;
-import freenet.support.io.NativeThread;
/**
* A high level data request.
*/
public class ClientGetter extends BaseClientGetter {
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final ClientCallback clientCallback;
FreenetURI uri;
final FetchContext ctx;
@@ -60,9 +74,9 @@
* write the data directly to the bucket, or copy it and free the
original temporary bucket. Preferably the
* former, obviously!
*/
- public ClientGetter(ClientCallback client, ClientRequestScheduler
chkSched, ClientRequestScheduler sskSched,
- FreenetURI uri, FetchContext ctx, short
priorityClass, Object clientContext, Bucket returnBucket, Bucket
binaryBlobBucket) {
- super(priorityClass, chkSched, sskSched, clientContext);
+ public ClientGetter(ClientCallback client,
+ FreenetURI uri, FetchContext ctx, short
priorityClass, RequestClient clientContext, Bucket returnBucket, Bucket
binaryBlobBucket) {
+ super(priorityClass, clientContext);
this.clientCallback = client;
this.returnBucket = returnBucket;
this.uri = uri;
@@ -77,13 +91,15 @@
archiveRestarts = 0;
}
- public void start() throws FetchException {
- start(false, null);
+ public void start(ObjectContainer container, ClientContext context)
throws FetchException {
+ start(false, null, container, context);
}
- public boolean start(boolean restart, FreenetURI overrideURI) throws
FetchException {
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Starting "+this);
+ public boolean start(boolean restart, FreenetURI overrideURI,
ObjectContainer container, ClientContext context) throws FetchException {
+ if(persistent())
+ container.activate(uri, 5);
+ if(logMINOR)
+ Logger.minor(this, "Starting "+this+"
persistent="+persistent());
try {
// FIXME synchronization is probably unnecessary.
// But we DEFINITELY do not want to synchronize while
calling currentState.schedule(),
@@ -96,38 +112,56 @@
cancelled = false;
finished = false;
}
- currentState = SingleFileFetcher.create(this,
this, new ClientMetadata(),
+ currentState = SingleFileFetcher.create(this,
this,
uri, ctx, actx,
ctx.maxNonSplitfileRetries, 0, false, -1, true,
- returnBucket, true);
+ returnBucket, true, container,
context);
}
if(cancelled) cancel();
+ // schedule() may deactivate stuff, so store it now.
+ if(persistent()) {
+ container.store(currentState);
+ container.store(this);
+ }
if(currentState != null && !finished) {
if(binaryBlobBucket != null) {
try {
binaryBlobStream = new
DataOutputStream(new BufferedOutputStream(binaryBlobBucket.getOutputStream()));
BinaryBlob.writeBinaryBlobHeader(binaryBlobStream);
} catch (IOException e) {
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to open binary blob
bucket", e), null);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to open binary blob
bucket", e), null, container, context);
+ if(persistent())
+ container.store(this);
return false;
}
}
- currentState.schedule();
+ currentState.schedule(container, context);
}
if(cancelled) cancel();
} catch (MalformedURLException e) {
throw new FetchException(FetchException.INVALID_URI, e);
+ } catch (KeyListenerConstructionException e) {
+ onFailure(e.getFetchException(), currentState,
container, context);
}
+ if(persistent()) {
+ container.store(this);
+ container.deactivate(currentState, 1);
+ }
return true;
}
- public void onSuccess(FetchResult result, ClientGetState state) {
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Succeeded from "+state);
- if(!closeBinaryBlobStream()) return;
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ if(logMINOR)
+ Logger.minor(this, "Succeeded from "+state+" on "+this);
+ if(persistent())
+ container.activate(uri, 5);
+ if(!closeBinaryBlobStream(container, context)) return;
synchronized(this) {
finished = true;
currentState = null;
}
+ if(persistent()) {
+ container.store(this);
+ }
// Rest of method does not need to be synchronized.
// Variables will be updated on exit of method, and the only
thing that is
// set is the returnBucket and the result. Not locking not only
prevents
@@ -137,39 +171,42 @@
Bucket from = result.asBucket();
Bucket to = returnBucket;
try {
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(logMINOR)
Logger.minor(this, "Copying -
returnBucket not respected by client.async");
+ if(persistent()) {
+ container.activate(from, 5);
+ container.activate(returnBucket, 5);
+ }
BucketTools.copy(from, to);
from.free();
+ if(persistent())
+ from.removeFrom(container);
} catch (IOException e) {
Logger.error(this, "Error copying from "+from+"
to "+to+" : "+e.toString(), e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e.toString()), state /* not
strictly to blame, but we're not ako ClientGetState... */);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e.toString()), state /* not
strictly to blame, but we're not ako ClientGetState... */, container, context);
+ return;
}
result = new FetchResult(result, to);
} else {
- if(returnBucket != null &&
Logger.shouldLog(Logger.MINOR, this))
+ if(returnBucket != null && logMINOR)
Logger.minor(this, "client.async returned data
in returnBucket");
}
- final FetchResult res = result;
- ctx.executor.execute(new PrioRunnable() {
- public void run() {
- clientCallback.onSuccess(res,
ClientGetter.this);
- }
-
- public int getPriority() {
- if(getPriorityClass() <=
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS)
- return NativeThread.NORM_PRIORITY;
- else
- return NativeThread.LOW_PRIORITY;
- }
- }, "ClientGetter onSuccess callback for "+this);
-
+ if(persistent()) {
+ container.activate(state, 1);
+ state.removeFrom(container, context);
+ container.activate(clientCallback, 1);
+ }
+ FetchResult res = result;
+ clientCallback.onSuccess(res, ClientGetter.this, container);
}
- public void onFailure(FetchException e, ClientGetState state) {
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Failed from "+state+" : "+e, e);
- closeBinaryBlobStream();
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ if(logMINOR)
+ Logger.minor(this, "Failed from "+state+" : "+e+" on
"+this, e);
+ closeBinaryBlobStream(container, context);
+ if(persistent())
+ container.activate(uri, 5);
+ ClientGetState oldState = null;
while(true) {
if(e.mode == FetchException.ARCHIVE_RESTART) {
int ar;
@@ -177,13 +214,13 @@
archiveRestarts++;
ar = archiveRestarts;
}
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(logMINOR)
Logger.minor(this, "Archive restart on
"+this+" ar="+ar);
if(ar > ctx.maxArchiveRestarts)
e = new
FetchException(FetchException.TOO_MANY_ARCHIVE_RESTARTS);
else {
try {
- start();
+ start(container, context);
} catch (FetchException e1) {
e = e1;
continue;
@@ -193,42 +230,56 @@
}
synchronized(this) {
finished = true;
+ oldState = currentState;
currentState = null;
}
if(e.errorCodes != null && e.errorCodes.isOneCodeOnly())
e = new
FetchException(e.errorCodes.getFirstCode(), e);
if(e.mode == FetchException.DATA_NOT_FOUND &&
super.successfulBlocks > 0)
e = new FetchException(e,
FetchException.ALL_DATA_NOT_FOUND);
- Logger.minor(this, "onFailure("+e+", "+state+") on
"+this+" for "+uri, e);
+ if(logMINOR) Logger.minor(this, "onFailure("+e+",
"+state+") on "+this+" for "+uri, e);
final FetchException e1 = e;
- ctx.executor.execute(new PrioRunnable() {
- public void run() {
- clientCallback.onFailure(e1,
ClientGetter.this);
- }
- public int getPriority() {
- if(getPriorityClass() <=
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS)
- return
NativeThread.NORM_PRIORITY;
- else
- return
NativeThread.LOW_PRIORITY;
- }
- }, "ClientGetter onFailure callback");
-
- return;
+ if(persistent())
+ container.store(this);
+ if(persistent()) {
+ container.activate(clientCallback, 1);
+ }
+ clientCallback.onFailure(e1, ClientGetter.this,
container);
+ break;
}
+ if(persistent()) {
+ if(state != null) {
+ container.activate(state, 1);
+ state.removeFrom(container, context);
+ }
+ if(oldState != state && oldState != null) {
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ }
+ }
}
- @Override
- public void cancel() {
- boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Cancelling "+this);
+ public void cancel(ObjectContainer container, ClientContext context) {
+ if(logMINOR) Logger.minor(this, "Cancelling "+this, new
Exception("debug"));
ClientGetState s;
synchronized(this) {
- super.cancel();
+ if(super.cancel()) {
+ if(logMINOR) Logger.minor(this, "Already
cancelled "+this);
+ return;
+ }
s = currentState;
}
+ if(persistent())
+ container.store(this);
if(s != null) {
- if(logMINOR) Logger.minor(this, "Cancelling
"+currentState);
- s.cancel();
+ if(persistent())
+ container.activate(s, 1);
+ if(logMINOR) Logger.minor(this, "Cancelling "+s+" for
"+this+" instance "+super.toString());
+ s.cancel(container, context);
+ if(persistent())
+ container.deactivate(s, 1);
+ } else {
+ if(logMINOR) Logger.minor(this, "Nothing to cancel");
}
}
@@ -243,47 +294,70 @@
}
@Override
- public void notifyClients() {
- ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized));
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
+ if(persistent()) {
+ container.activate(ctx, 1);
+ container.activate(ctx.eventProducer, 1);
+ }
+ ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized), container, context);
}
- public void onBlockSetFinished(ClientGetState state) {
- if(Logger.shouldLog(Logger.MINOR, this))
+ public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context) {
+ if(logMINOR)
Logger.minor(this, "Set finished", new
Exception("debug"));
- blockSetFinalized();
+ blockSetFinalized(container, context);
}
- @Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
synchronized(this) {
if(currentState == oldState) {
currentState = newState;
- Logger.minor(this, "Transition: "+oldState+" ->
"+newState);
- } else
- Logger.minor(this, "Ignoring transition:
"+oldState+" -> "+newState+" because current = "+currentState);
+ if(logMINOR) Logger.minor(this, "Transition:
"+oldState+" -> "+newState+" on "+this+" persistent = "+persistent()+" instance
= "+super.toString(), new Exception("debug"));
+ } else {
+ if(logMINOR) Logger.minor(this, "Ignoring
transition: "+oldState+" -> "+newState+" because current = "+currentState+" on
"+this+" persistent = "+persistent(), new Exception("debug"));
+ return;
+ }
}
+ if(persistent()) {
+ container.store(this);
+// container.deactivate(this, 1);
+// System.gc();
+// System.runFinalization();
+// System.gc();
+// System.runFinalization();
+// container.activate(this, 1);
+// synchronized(this) {
+// Logger.minor(this, "Post transition:
"+currentState);
+// }
+ }
}
public boolean canRestart() {
if(currentState != null && !finished) {
- Logger.minor(this, "Cannot restart because not finished
for "+uri);
+ if(logMINOR) Logger.minor(this, "Cannot restart because
not finished for "+uri);
return false;
}
return true;
}
- public boolean restart(FreenetURI redirect) throws FetchException {
- return start(true, redirect);
+ public boolean restart(FreenetURI redirect, ObjectContainer container,
ClientContext context) throws FetchException {
+ return start(true, redirect, container, context);
}
@Override
public String toString() {
- return super.toString()+ ':' +uri;
+ return super.toString();
}
- void addKeyToBinaryBlob(ClientKeyBlock block) {
+ // FIXME not persisting binary blob stuff - any stream won't survive
shutdown...
+
+ void addKeyToBinaryBlob(ClientKeyBlock block, ObjectContainer
container, ClientContext context) {
if(binaryBlobKeysAddedAlready == null) return;
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(persistent()) {
+ container.activate(binaryBlobStream, 1);
+ container.activate(binaryBlobKeysAddedAlready, 1);
+ }
+ if(logMINOR)
Logger.minor(this, "Adding key
"+block.getClientKey().getURI()+" to "+this, new Exception("debug"));
Key key = block.getKey();
synchronized(binaryBlobKeysAddedAlready) {
@@ -294,7 +368,7 @@
BinaryBlob.writeKey(binaryBlobStream, block,
key);
} catch (IOException e) {
Logger.error(this, "Failed to write key to
binary blob stream: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to write key to binary blob
stream: "+e), null);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to write key to binary blob
stream: "+e), null, container, context);
binaryBlobStream = null;
binaryBlobKeysAddedAlready.clear();
}
@@ -306,7 +380,11 @@
* @return True unless a failure occurred, in which case we will have
already
* called onFailure() with an appropriate error.
*/
- private boolean closeBinaryBlobStream() {
+ private boolean closeBinaryBlobStream(ObjectContainer container,
ClientContext context) {
+ if(persistent()) {
+ container.activate(binaryBlobStream, 1);
+ container.activate(binaryBlobKeysAddedAlready, 1);
+ }
if(binaryBlobKeysAddedAlready == null) return true;
synchronized(binaryBlobKeysAddedAlready) {
if(binaryBlobStream == null) return true;
@@ -319,7 +397,7 @@
return true;
} catch (IOException e) {
Logger.error(this, "Failed to close binary blob
stream: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to close binary blob
stream: "+e), null);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, "Failed to close binary blob
stream: "+e), null, container, context);
if(!triedClose) {
try {
binaryBlobStream.close();
@@ -339,18 +417,24 @@
return binaryBlobBucket != null;
}
- public void onExpectedMIME(String mime) {
+ public void onExpectedMIME(String mime, ObjectContainer container) {
if(finalizedMetadata) return;
expectedMIME = mime;
+ if(persistent())
+ container.store(this);
}
- public void onExpectedSize(long size) {
+ public void onExpectedSize(long size, ObjectContainer container) {
if(finalizedMetadata) return;
expectedSize = size;
+ if(persistent())
+ container.store(this);
}
- public void onFinalizedMetadata() {
+ public void onFinalizedMetadata(ObjectContainer container) {
finalizedMetadata = true;
+ if(persistent())
+ container.store(this);
}
public boolean finalizedMetadata() {
@@ -368,4 +452,19 @@
public ClientCallback getClientCallback() {
return clientCallback;
}
-}
\ No newline at end of file
+
+ @Override
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ container.activate(ctx, 1);
+ ctx.removeFrom(container);
+ container.activate(actx, 5);
+ actx.removeFrom(container);
+ if(returnBucket != null) {
+ container.activate(returnBucket, 1);
+ returnBucket.removeFrom(container);
+ }
+ super.removeFrom(container, context);
+ }
+}
Modified: trunk/freenet/src/freenet/client/async/ClientPutState.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientPutState.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientPutState.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertException;
import freenet.support.SimpleFieldSet;
@@ -17,10 +19,10 @@
public abstract BaseClientPutter getParent();
/** Cancel the request. */
- public abstract void cancel();
+ public abstract void cancel(ObjectContainer container, ClientContext
context);
/** Schedule the request. */
- public abstract void schedule() throws InsertException;
+ public abstract void schedule(ObjectContainer container, ClientContext
context) throws InsertException;
/**
* Get the token, an object which is passed around with the insert and
may be
@@ -31,4 +33,11 @@
/** Serialize current progress to a SimpleFieldSet.
* Does not have to be complete! */
public abstract SimpleFieldSet getProgressFieldset();
+
+ /**
+ * Once the callback has finished with this fetch, it will call
removeFrom() to instruct the fetch
+ * to remove itself and all its subsidiary objects from the database.
+ * @param container
+ */
+ public void removeFrom(ObjectContainer container, ClientContext
context);
}
Modified: trunk/freenet/src/freenet/client/async/ClientPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientPutter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientPutter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.IOException;
+import com.db4o.ObjectContainer;
+
import freenet.client.ClientMetadata;
import freenet.client.InsertBlock;
import freenet.client.InsertContext;
@@ -13,6 +15,8 @@
import freenet.client.events.SplitfileProgressEvent;
import freenet.keys.BaseClientKey;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
+import freenet.node.RequestScheduler;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -53,9 +57,9 @@
* @param targetFilename If set, create a one-file manifest containing
this filename pointing to this file.
*/
public ClientPutter(ClientCallback client, Bucket data, FreenetURI
targetURI, ClientMetadata cm, InsertContext ctx,
- ClientRequestScheduler chkScheduler,
ClientRequestScheduler sskScheduler, short priorityClass, boolean getCHKOnly,
- boolean isMetadata, Object clientContext,
SimpleFieldSet stored, String targetFilename, boolean binaryBlob) {
- super(priorityClass, chkScheduler, sskScheduler, clientContext);
+ short priorityClass, boolean getCHKOnly,
+ boolean isMetadata, RequestClient clientContext,
SimpleFieldSet stored, String targetFilename, boolean binaryBlob) {
+ super(priorityClass, clientContext);
this.cm = cm;
this.isMetadata = isMetadata;
this.getCHKOnly = getCHKOnly;
@@ -70,11 +74,13 @@
this.binaryBlob = binaryBlob;
}
- public void start(boolean earlyEncode) throws InsertException {
- start(earlyEncode, false);
+ public void start(boolean earlyEncode, ObjectContainer container,
ClientContext context) throws InsertException {
+ start(earlyEncode, false, container, context);
}
- public boolean start(boolean earlyEncode, boolean restart) throws
InsertException {
+ public boolean start(boolean earlyEncode, boolean restart,
ObjectContainer container, ClientContext context) throws InsertException {
+ if(persistent())
+ container.activate(client, 1);
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Starting "+this);
try {
@@ -94,17 +100,19 @@
if(currentState != null) return false;
cancel = this.cancelled;
if(!cancel) {
- if(!binaryBlob)
+ if(!binaryBlob) {
+ ClientMetadata meta = cm;
+ if(meta != null) meta =
persistent() ? meta.clone() : meta;
currentState =
- new
SingleFileInserter(this, this, new InsertBlock(data, cm, targetURI),
isMetadata, ctx,
+ new
SingleFileInserter(this, this, new InsertBlock(data, meta, persistent() ?
targetURI.clone() : targetURI), isMetadata, ctx,
false,
getCHKOnly, false, null, null, false, targetFilename, earlyEncode);
- else
+ } else
currentState =
- new
BinaryBlobInserter(data, this, null, false, priorityClass, ctx);
+ new
BinaryBlobInserter(data, this, null, false, priorityClass, ctx, context,
container);
}
}
if(cancel) {
- onFailure(new
InsertException(InsertException.CANCELLED), null);
+ onFailure(new
InsertException(InsertException.CANCELLED), null, container, context);
oldProgress = null;
return false;
}
@@ -112,22 +120,29 @@
cancel = cancelled;
}
if(cancel) {
- onFailure(new
InsertException(InsertException.CANCELLED), null);
+ onFailure(new
InsertException(InsertException.CANCELLED), null, container, context);
oldProgress = null;
+ if(persistent())
+ container.store(this);
return false;
}
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Starting insert:
"+currentState);
if(currentState instanceof SingleFileInserter)
-
((SingleFileInserter)currentState).start(oldProgress);
+
((SingleFileInserter)currentState).start(oldProgress, container, context);
else
- currentState.schedule();
+ currentState.schedule(container, context);
synchronized(this) {
oldProgress = null;
cancel = cancelled;
}
+ if(persistent()) {
+ container.store(this);
+ // It has scheduled, we can safely deactivate
it now, so it won't hang around in memory.
+ container.deactivate(currentState, 1);
+ }
if(cancel) {
- onFailure(new
InsertException(InsertException.CANCELLED), null);
+ onFailure(new
InsertException(InsertException.CANCELLED), null, container, context);
return false;
}
} catch (InsertException e) {
@@ -137,9 +152,11 @@
oldProgress = null;
currentState = null;
}
+ if(persistent())
+ container.store(this);
// notify the client that the insert could not even be
started
if (this.client!=null) {
- this.client.onFailure(e, this);
+ this.client.onFailure(e, this, container);
}
} catch (IOException e) {
Logger.error(this, "Failed to start insert: "+e, e);
@@ -148,9 +165,11 @@
oldProgress = null;
currentState = null;
}
+ if(persistent())
+ container.store(this);
// notify the client that the insert could not even be
started
if (this.client!=null) {
- this.client.onFailure(new
InsertException(InsertException.BUCKET_ERROR, e, null), this);
+ this.client.onFailure(new
InsertException(InsertException.BUCKET_ERROR, e, null), this, container);
}
} catch (BinaryBlobFormatException e) {
Logger.error(this, "Failed to start insert: "+e, e);
@@ -159,9 +178,11 @@
oldProgress = null;
currentState = null;
}
+ if(persistent())
+ container.store(this);
// notify the client that the insert could not even be
started
if (this.client!=null) {
- this.client.onFailure(new
InsertException(InsertException.BINARY_BLOB_FORMAT_ERROR, e, null), this);
+ this.client.onFailure(new
InsertException(InsertException.BINARY_BLOB_FORMAT_ERROR, e, null), this,
container);
}
}
if(Logger.shouldLog(Logger.MINOR, this))
@@ -169,55 +190,97 @@
return true;
}
- public void onSuccess(ClientPutState state) {
+ public void onSuccess(ClientPutState state, ObjectContainer container,
ClientContext context) {
+ if(persistent())
+ container.activate(client, 1);
+ ClientPutState oldState;
synchronized(this) {
finished = true;
+ oldState = currentState;
currentState = null;
oldProgress = null;
}
+ if(oldState != null && persistent()) {
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ }
+ if(state != null && state != oldState && persistent())
+ state.removeFrom(container, context);
if(super.failedBlocks > 0 || super.fatallyFailedBlocks > 0 ||
super.successfulBlocks < super.totalBlocks) {
Logger.error(this, "Failed blocks: "+failedBlocks+",
Fatally failed blocks: "+fatallyFailedBlocks+
", Successful blocks:
"+successfulBlocks+", Total blocks: "+totalBlocks+" but success?! on "+this+"
from "+state,
new Exception("debug"));
}
- client.onSuccess(this);
+ if(persistent())
+ container.store(this);
+ client.onSuccess(this, container);
}
- public void onFailure(InsertException e, ClientPutState state) {
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"onFailure() for "+this+" : "+state+" : "+e, e);
+ if(persistent())
+ container.activate(client, 1);
+ ClientPutState oldState;
synchronized(this) {
finished = true;
+ oldState = currentState;
currentState = null;
oldProgress = null;
}
- client.onFailure(e, this);
+ if(oldState != null && persistent()) {
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ }
+ if(state != null && state != oldState && persistent())
+ state.removeFrom(container, context);
+ if(persistent())
+ container.store(this);
+ client.onFailure(e, this, container);
}
@Override
- public void onMajorProgress() {
- client.onMajorProgress();
+ public void onMajorProgress(ObjectContainer container) {
+ if(persistent())
+ container.activate(client, 1);
+ client.onMajorProgress(container);
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent())
+ container.activate(client, 1);
synchronized(this) {
+ if(this.uri != null) {
+ Logger.error(this, "onEncode() called twice?
Already have a uri: "+uri+" for "+this);
+ if(persistent())
+ this.uri.removeFrom(container);
+ }
this.uri = key.getURI();
if(targetFilename != null)
uri = uri.pushMetaString(targetFilename);
}
- client.onGeneratedURI(uri, this);
+ if(persistent())
+ container.store(this);
+ client.onGeneratedURI(uri, this, container);
}
-
+
@Override
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Cancelling "+this, new
Exception("debug"));
ClientPutState oldState = null;
synchronized(this) {
if(cancelled) return;
+ if(finished) return;
super.cancel();
oldState = currentState;
}
- if(oldState != null) oldState.cancel();
- onFailure(new InsertException(InsertException.CANCELLED), null);
+ if(persistent()) {
+ container.store(this);
+ if(oldState != null)
+ container.activate(oldState, 1);
+ }
+ if(oldState != null) oldState.cancel(container, context);
+ onFailure(new InsertException(InsertException.CANCELLED), null,
container, context);
}
@Override
@@ -230,31 +293,36 @@
return uri;
}
- public void onTransition(ClientPutState oldState, ClientPutState
newState) {
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container) {
if(newState == null) throw new NullPointerException();
+ // onTransition is *not* responsible for removing the old
state, the caller is.
synchronized (this) {
if (currentState == oldState) {
currentState = newState;
+ if(persistent())
+ container.store(this);
return;
}
}
Logger.error(this, "onTransition: cur=" + currentState + ",
old=" + oldState + ", new=" + newState);
}
- public void onMetadata(Metadata m, ClientPutState state) {
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
Logger.error(this, "Got metadata on "+this+" from "+state+"
(this means the metadata won't be inserted)");
}
@Override
- public void notifyClients() {
- ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized));
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
+ if(persistent())
+ container.activate(ctx, 2);
+ ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized), container, context);
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Set finished", new
Exception("debug"));
- blockSetFinalized();
+ blockSetFinalized(container, context);
}
public SimpleFieldSet getProgressFieldset() {
@@ -262,8 +330,10 @@
return currentState.getProgressFieldset();
}
- public void onFetchable(ClientPutState state) {
- client.onFetchable(this);
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
+ if(persistent())
+ container.activate(client, 1);
+ client.onFetchable(this, container);
}
public boolean canRestart() {
@@ -275,13 +345,27 @@
return true;
}
- public boolean restart(boolean earlyEncode) throws InsertException {
- return start(earlyEncode, true);
+ public boolean restart(boolean earlyEncode, ObjectContainer container,
ClientContext context) throws InsertException {
+ return start(earlyEncode, true, container, context);
}
@Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore, at the moment
}
+ @Override
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.activate(cm, 2);
+ cm.removeFrom(container);
+ container.activate(ctx, 1);
+ ctx.removeFrom(container);
+ container.activate(targetURI, 5);
+ targetURI.removeFrom(container);
+ if(uri != null) {
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ }
+ super.removeFrom(container, context);
+ }
}
Modified: trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientRequestScheduler.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,23 +3,28 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
-import java.lang.ref.WeakReference;
+import java.security.MessageDigest;
+import java.util.ArrayList;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.LinkedList;
-import java.util.Map;
-import java.util.WeakHashMap;
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+
+import freenet.client.FECQueue;
+import freenet.client.FetchException;
import freenet.config.EnumerableOptionCallback;
import freenet.config.InvalidConfigValueException;
import freenet.config.SubConfig;
import freenet.crypt.RandomSource;
+import freenet.crypt.SHA256;
import freenet.keys.ClientKey;
-import freenet.keys.ClientKeyBlock;
import freenet.keys.Key;
import freenet.keys.KeyBlock;
-import freenet.keys.KeyVerifyException;
+import freenet.node.BaseSendableGet;
+import freenet.node.KeysFetchingLocally;
import freenet.node.LowLevelGetException;
+import freenet.node.LowLevelPutException;
import freenet.node.Node;
import freenet.node.NodeClientCore;
import freenet.node.RequestScheduler;
@@ -29,11 +34,9 @@
import freenet.node.SendableRequest;
import freenet.support.Logger;
import freenet.support.LogThresholdCallback;
-import freenet.support.RandomGrabArray;
-import freenet.support.SectoredRandomGrabArrayWithInt;
-import freenet.support.SectoredRandomGrabArrayWithObject;
-import freenet.support.SortedVectorByNumber;
+import freenet.support.PrioritizedSerialExecutor;
import freenet.support.api.StringCallback;
+import freenet.support.io.NativeThread;
/**
* Every X seconds, the RequestSender calls the ClientRequestScheduler to
@@ -42,18 +45,21 @@
*/
public class ClientRequestScheduler implements RequestScheduler {
- private static volatile boolean logMINOR;
-
- static {
- Logger.registerLogThresholdCallback(new LogThresholdCallback() {
-
- @Override
- public void shouldUpdate() {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- }
- });
- }
+ private final ClientRequestSchedulerCore schedCore;
+ final ClientRequestSchedulerNonPersistent schedTransient;
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
public static class PrioritySchedulerCallback extends StringCallback
implements EnumerableOptionCallback {
final ClientRequestScheduler cs;
private final String[] possibleValues = new String[]{
ClientRequestScheduler.PRIORITY_HARD, ClientRequestScheduler.PRIORITY_SOFT };
@@ -89,100 +95,39 @@
}
}
- /**
- * Structure:
- * array (by priority) -> // one element per possible priority
- * SortedVectorByNumber (by # retries) -> // contains each current
#retries
- * RandomGrabArray // contains each element, allows fast
fetch-and-drop-a-random-element
- *
- * To speed up fetching, a RGA or SVBN must only exist if it is
non-empty.
- */
- private final SortedVectorByNumber[] priorities;
+ /** This DOES NOT PERSIST */
private final OfferedKeysList[] offeredKeys;
// we have one for inserts and one for requests
final boolean isInsertScheduler;
final boolean isSSKScheduler;
final RandomSource random;
- private final Map<ClientRequester, HashSet<SendableRequest>>
allRequestsByClientRequest;
private final RequestStarter starter;
private final Node node;
public final String name;
- private final LinkedList<WeakReference<RandomGrabArray>>
recentSuccesses = new LinkedList<WeakReference<RandomGrabArray>>();
- private final RequestCooldownQueue cooldownQueue;
- /** All pending gets by key. Used to automatically satisfy pending
requests when either the key is fetched by
- * an overlapping request, or it is fetched by a request from another
node. Operations on this are synchronized on
- * itself. */
- private final HashMap<Key, Object> pendingKeys;
+ private final CooldownQueue transientCooldownQueue;
+ private final CooldownQueue persistentCooldownQueue;
+ final PrioritizedSerialExecutor databaseExecutor;
+ final DatastoreChecker datastoreChecker;
+ public final ClientContext clientContext;
+ final DBJobRunner jobRunner;
public static final String PRIORITY_NONE = "NONE";
public static final String PRIORITY_SOFT = "SOFT";
public static final String PRIORITY_HARD = "HARD";
- /** Minimum number of retries at which we start to hold it against a
request.
- * See the comments on fixRetryCount; we don't want many untried
requests to prevent
- * us from trying requests which have only been tried once (e.g. USK
checkers), from
- * other clients (and we DO want retries to take precedence over client
round robin IF
- * the request has been tried many times already). */
- private static final int MIN_RETRY_COUNT = 3;
private String choosenPriorityScheduler;
- private final short[] tweakedPrioritySelector = {
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
-
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
-
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
-
- RequestStarter.UPDATE_PRIORITY_CLASS,
- RequestStarter.UPDATE_PRIORITY_CLASS,
- RequestStarter.UPDATE_PRIORITY_CLASS,
- RequestStarter.UPDATE_PRIORITY_CLASS,
-
- RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
-
- RequestStarter.PREFETCH_PRIORITY_CLASS,
- RequestStarter.PREFETCH_PRIORITY_CLASS,
-
- RequestStarter.MINIMUM_PRIORITY_CLASS
- };
- private final short[] prioritySelector = {
- RequestStarter.MAXIMUM_PRIORITY_CLASS,
- RequestStarter.INTERACTIVE_PRIORITY_CLASS,
- RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.UPDATE_PRIORITY_CLASS,
- RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
- RequestStarter.PREFETCH_PRIORITY_CLASS,
- RequestStarter.MINIMUM_PRIORITY_CLASS
- };
-
- public ClientRequestScheduler(boolean forInserts, boolean forSSKs,
RandomSource random, RequestStarter starter, Node node, NodeClientCore core,
SubConfig sc, String name) {
+ public ClientRequestScheduler(boolean forInserts, boolean forSSKs,
RandomSource random, RequestStarter starter, Node node, NodeClientCore core,
SubConfig sc, String name, ClientContext context) {
+ this.isInsertScheduler = forInserts;
+ this.isSSKScheduler = forSSKs;
+ schedCore = ClientRequestSchedulerCore.create(node, forInserts,
forSSKs, node.db, COOLDOWN_PERIOD, core.clientDatabaseExecutor, this, context);
+ schedTransient = new ClientRequestSchedulerNonPersistent(this,
forInserts, forSSKs);
+ persistentCooldownQueue = schedCore.persistentCooldownQueue;
+ this.databaseExecutor = core.clientDatabaseExecutor;
+ this.datastoreChecker = core.storeChecker;
this.starter = starter;
this.random = random;
this.node = node;
- this.isInsertScheduler = forInserts;
- this.isSSKScheduler = forSSKs;
- priorities = new
SortedVectorByNumber[RequestStarter.NUMBER_OF_PRIORITY_CLASSES];
- allRequestsByClientRequest = new WeakHashMap<ClientRequester,
HashSet<SendableRequest>>();
- if(forInserts)
- pendingKeys = null;
- else
- pendingKeys = new HashMap<Key, Object>();
+ this.clientContext = context;
this.name = name;
sc.register(name+"_priority_policy", PRIORITY_HARD,
name.hashCode(), true, false,
@@ -194,16 +139,51 @@
if(!forInserts) {
offeredKeys = new
OfferedKeysList[RequestStarter.NUMBER_OF_PRIORITY_CLASSES];
for(short
i=0;i<RequestStarter.NUMBER_OF_PRIORITY_CLASSES;i++)
- offeredKeys[i] = new OfferedKeysList(core,
random, i);
+ offeredKeys[i] = new OfferedKeysList(core,
random, i, forSSKs);
} else {
offeredKeys = null;
}
if(!forInserts)
- cooldownQueue = new
RequestCooldownQueue(COOLDOWN_PERIOD);
+ transientCooldownQueue = new
RequestCooldownQueue(COOLDOWN_PERIOD);
else
- cooldownQueue = null;
+ transientCooldownQueue = null;
+ jobRunner = clientContext.jobRunner;
}
+ public static void loadKeyListeners(final ObjectContainer container,
ClientContext context) {
+ ObjectSet<HasKeyListener> results =
+ Db4oBugs.query(container, HasKeyListener.class);
+ for(HasKeyListener l : results) {
+ container.activate(l, 1);
+ try {
+ if(l.isCancelled(container)) continue;
+ KeyListener listener =
l.makeKeyListener(container, context);
+ if(listener != null) {
+ if(listener.isSSK())
+
context.getSskFetchScheduler().addPersistentPendingKeys(listener);
+ else
+
context.getChkFetchScheduler().addPersistentPendingKeys(listener);
+ System.err.println("Loaded request key
listener: "+listener+" for "+l);
+ }
+ } catch (KeyListenerConstructionException e) {
+ System.err.println("FAILED TO LOAD REQUEST
BLOOM FILTERS:");
+ e.printStackTrace();
+ Logger.error(ClientRequestSchedulerCore.class,
"FAILED TO LOAD REQUEST BLOOM FILTERS: "+e, e);
+ } catch (Throwable t) {
+ // Probably an error on last startup???
+ Logger.error(ClientRequestSchedulerCore.class,
"FAILED TO LOAD REQUEST: "+t, t);
+ System.err.println("FAILED TO LOAD REQUEST:
"+t);
+ t.printStackTrace();
+ }
+ container.deactivate(l, 1);
+ }
+ }
+
+ public void start(NodeClientCore core) {
+ schedCore.start(core);
+ queueFillRequestStarterQueue();
+ }
+
/** Called by the config. Callback
*
* @param val
@@ -212,505 +192,807 @@
choosenPriorityScheduler = val;
}
- public void register(SendableRequest req) {
- if(logMINOR) Logger.minor(this, "Registering "+req, new
Exception("debug"));
- if(isInsertScheduler != (req instanceof SendableInsert))
- throw new IllegalArgumentException("Expected a
SendableInsert: "+req);
- if(req instanceof SendableGet) {
- SendableGet getter = (SendableGet)req;
- if(!getter.ignoreStore()) {
- boolean anyValid = false;
- Object[] keyTokens = getter.sendableKeys();
- for(int i=0;i<keyTokens.length;i++) {
- Object tok = keyTokens[i];
- ClientKeyBlock block = null;
- try {
- ClientKey key =
getter.getKey(tok);
- if(key == null) {
- if(logMINOR)
-
Logger.minor(this, "No key for "+tok+" for "+getter+" - already finished?");
- continue;
- } else {
-
if(getter.getContext().blocks != null)
- block =
getter.getContext().blocks.get(key);
- if(block == null)
- block =
node.fetchKey(key, getter.dontCache());
- if(block == null) {
-
addPendingKey(key, getter);
- } else {
- if(logMINOR)
-
Logger.minor(this, "Got "+block+" for "+key.getURI());
+ public void registerInsert(final SendableRequest req, boolean
persistent, boolean regmeOnly, ObjectContainer container) {
+ registerInsert(req, persistent, regmeOnly,
databaseExecutor.onThread(), container);
+ }
+
+ static final int QUEUE_THRESHOLD = 100;
+
+ public void registerInsert(final SendableRequest req, boolean
persistent, boolean regmeOnly, boolean onDatabaseThread, ObjectContainer
container) {
+ if(!isInsertScheduler)
+ throw new IllegalArgumentException("Adding a
SendableInsert to a request scheduler!!");
+ if(persistent) {
+ if(onDatabaseThread) {
+ if(regmeOnly) {
+ long bootID = 0;
+ boolean queueFull =
jobRunner.getQueueSize(NativeThread.NORM_PRIORITY) >= QUEUE_THRESHOLD;
+ if(!queueFull)
+ bootID = this.node.bootID;
+ final RegisterMe regme = new
RegisterMe(req, req.getPriorityClass(container), schedCore, null, bootID);
+ container.store(regme);
+ if(logMINOR)
+ Logger.minor(this, "Added
insert RegisterMe: "+regme);
+ if(!queueFull) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ container.delete(regme);
+
if(req.isCancelled(container)) {
+ if(logMINOR)
Logger.minor(this, "Request already cancelled");
+ return;
}
+
if(container.ext().isActive(req))
+
Logger.error(this, "ALREADY ACTIVE: "+req+" in delayed insert register");
+ container.activate(req,
1);
+ registerInsert(req,
true, false, true, container);
+
container.deactivate(req, 1);
}
- } catch (KeyVerifyException e) {
- // Verify exception, probably
bogus at source;
- // verifies at low-level, but
not at decode.
- if(logMINOR)
- Logger.minor(this,
"Decode failed: "+e, e);
- getter.onFailure(new
LowLevelGetException(LowLevelGetException.DECODE_FAILED), tok, this);
- continue; // other keys might
be valid
- }
- if(block != null) {
- if(logMINOR) Logger.minor(this,
"Can fulfill "+req+" ("+tok+") immediately from store");
- getter.onSuccess(block, true,
tok, this);
- // Even with working thread
priorities, we still get very high latency accessing
- // the datastore when
background threads are doing it in parallel.
- // So yield() here, unless
priority is very high.
- if(req.getPriorityClass() >
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS)
- Thread.yield();
+
+ }, NativeThread.NORM_PRIORITY, false);
} else {
- anyValid = true;
+
schedCore.rerunRegisterMeRunner(jobRunner);
}
- }
- if(!anyValid) {
- if(logMINOR)
- Logger.minor(this, "No valid
keys, returning without registering for "+req);
+ container.deactivate(req, 1);
return;
}
+ schedCore.innerRegister(req, random, container,
null);
+ starter.wakeUp();
+ } else {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(container.ext().isActive(req))
+ Logger.error(this,
"ALREADY ACTIVE: "+req+" in off-thread insert register");
+ container.activate(req, 1);
+ schedCore.innerRegister(req,
random, container, null);
+ container.deactivate(req, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
}
+ } else {
+ schedTransient.innerRegister(req, random, null, null);
+ starter.wakeUp();
}
- innerRegister(req);
- starter.wakeUp();
}
/**
- * Register a pending key to an already-registered request. This is
necessary if we've
- * already registered a SendableGet, but we later add some more keys to
it.
+ * Register a group of requests (not inserts): a GotKeyListener and/or
one
+ * or more SendableGet's.
+ * @param listener Listeners for specific keys. Can be null if the
listener
+ * is already registered e.g. most of the time with SplitFileFetcher*.
+ * @param getters The actual requests to register to the request sender
queue.
+ * @param persistent True if the request is persistent.
+ * @param onDatabaseThread True if we are running on the database
thread.
+ * NOTE: delayedStoreCheck/probablyNotInStore is unnecessary because we
only
+ * register the listener once.
+ * @throws FetchException
*/
- void addPendingKey(ClientKey key, SendableGet getter) {
+ public void register(final HasKeyListener hasListener, final
SendableGet[] getters, final boolean persistent, boolean onDatabaseThread,
ObjectContainer container, final BlockSet blocks, final boolean noCheckStore)
throws KeyListenerConstructionException {
if(logMINOR)
- Logger.minor(this, "Adding pending key "+key+" for
"+getter);
- Key nodeKey = key.getNodeKey();
- synchronized(pendingKeys) {
- Object o = pendingKeys.get(nodeKey);
- if(o == null) {
- pendingKeys.put(nodeKey, getter);
- } else if(o instanceof SendableGet) {
- SendableGet oldGet = (SendableGet) o;
- if(oldGet != getter) {
- pendingKeys.put(nodeKey, new
SendableGet[] { oldGet, getter });
- }
+ Logger.minor(this,
"register("+persistent+","+hasListener+","+getters);
+ if(isInsertScheduler) {
+ IllegalStateException e = new
IllegalStateException("finishRegister on an insert scheduler");
+ throw e;
+ }
+ if(persistent) {
+ if(onDatabaseThread) {
+ innerRegister(hasListener, getters, blocks,
noCheckStore, container);
} else {
- SendableGet[] gets = (SendableGet[]) o;
- boolean found = false;
- for(int j=0;j<gets.length;j++) {
- if(gets[j] == getter) {
- found = true;
- break;
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ // registerOffThread would be
pointless because this is a separate job.
+ if(hasListener != null) {
+
if(container.ext().isActive(hasListener))
+
Logger.error(this, "ALREADY ACTIVE in delayed register: "+hasListener);
+
container.activate(hasListener, 1);
+ }
+ if(getters != null) {
+ for(int
i=0;i<getters.length;i++) {
+
if(container.ext().isActive(getters[i]))
+
Logger.error(this, "ALREADY ACTIVE in delayed register: "+getters[i]);
+
container.activate(getters[i], 1);
+ }
+ }
+ try {
+
innerRegister(hasListener, getters, blocks, noCheckStore, container);
+ } catch
(KeyListenerConstructionException e) {
+ Logger.error(this,
"Registration failed to create Bloom filters: "+e+" on "+hasListener, e);
+ }
+ if(hasListener != null)
+
container.deactivate(hasListener, 1);
+ if(getters != null) {
+ for(int
i=0;i<getters.length;i++)
+
container.deactivate(getters[i], 1);
+ }
}
+
+ }, NativeThread.NORM_PRIORITY, false);
+ }
+ } else {
+ final KeyListener listener;
+ if(hasListener != null) {
+ listener =
hasListener.makeKeyListener(container, clientContext);
+ if(listener != null)
+ schedTransient.addPendingKeys(listener);
+ else
+ Logger.normal(this, "No KeyListener for
"+listener);
+ } else
+ listener = null;
+ if(getters != null && !noCheckStore) {
+ for(SendableGet getter : getters)
+
datastoreChecker.queueTransientRequest(getter, blocks);
+ } else {
+ boolean anyValid = false;
+ for(int i=0;i<getters.length;i++) {
+ if(!(getters[i].isCancelled(null) ||
getters[i].isEmpty(null)))
+ anyValid = true;
}
- if(!found) {
- SendableGet[] newGets = new
SendableGet[gets.length+1];
- System.arraycopy(gets, 0, newGets, 0,
gets.length);
- newGets[gets.length] = getter;
- pendingKeys.put(nodeKey, newGets);
+ finishRegister(getters, false,
onDatabaseThread, container, anyValid, null);
+ }
+ }
+ }
+
+
+ private void innerRegister(final HasKeyListener hasListener, final
SendableGet[] getters, final BlockSet blocks, boolean noCheckStore,
ObjectContainer container) throws KeyListenerConstructionException {
+ final KeyListener listener;
+ if(hasListener != null) {
+ listener = hasListener.makeKeyListener(container,
clientContext);
+ schedCore.addPendingKeys(listener);
+ container.store(hasListener);
+ } else
+ listener = null;
+
+ // Avoid NPEs due to deactivation.
+ if(getters != null) {
+ for(SendableGet getter : getters) {
+ container.activate(getter, 1);
+ container.store(getter);
+ }
+ }
+
+ if(isInsertScheduler) {
+ IllegalStateException e = new
IllegalStateException("finishRegister on an insert scheduler");
+ throw e;
+ }
+ if(!noCheckStore) {
+ // Check the datastore before proceding.
+ for(SendableGet getter : getters) {
+ container.activate(getter, 1);
+ datastoreChecker.queuePersistentRequest(getter,
blocks, container);
+ container.deactivate(getter, 1);
+ }
+
+ } else {
+ // We have already checked the datastore, this is a
retry, the listener hasn't been unregistered.
+ short prio = RequestStarter.MINIMUM_PRIORITY_CLASS;
+ for(int i=0;i<getters.length;i++) {
+ short p =
getters[i].getPriorityClass(container);
+ if(p < prio) prio = p;
+ }
+ this.finishRegister(getters, true, true, container,
true, null);
+ }
+ }
+
+ void finishRegister(final SendableGet[] getters, boolean persistent,
boolean onDatabaseThread, ObjectContainer container, final boolean anyValid,
final DatastoreCheckerItem reg) {
+ if(isInsertScheduler && getters != null) {
+ IllegalStateException e = new
IllegalStateException("finishRegister on an insert scheduler");
+ if(onDatabaseThread || !persistent) {
+ for(int i=0;i<getters.length;i++) {
+ if(persistent)
+ container.activate(getters[i],
1);
+ getters[i].internalError(e, this,
container, clientContext, persistent);
+ if(persistent)
+
container.deactivate(getters[i], 1);
}
}
+ throw e;
}
+ if(persistent) {
+ // Add to the persistent registration queue
+ if(onDatabaseThread) {
+ if(!databaseExecutor.onThread()) {
+ throw new IllegalStateException("Not on
database thread!");
+ }
+ if(persistent)
+ container.activate(getters, 1);
+ if(logMINOR)
+ Logger.minor(this, "finishRegister()
for "+getters);
+ if(anyValid) {
+ boolean wereAnyValid = false;
+ for(int i=0;i<getters.length;i++) {
+ SendableGet getter = getters[i];
+ container.activate(getter, 1);
+
if(!(getter.isCancelled(container) || getter.isEmpty(container))) {
+ wereAnyValid = true;
+
schedCore.innerRegister(getter, random, container, getters);
+ }
+ }
+ if(!wereAnyValid) {
+ Logger.normal(this, "No
requests valid: "+getters);
+ }
+ } else {
+ Logger.normal(this, "No valid requests
passed in: "+getters);
+ }
+ if(reg != null)
+ container.delete(reg);
+ maybeFillStarterQueue(container, clientContext,
getters);
+ starter.wakeUp();
+ } else {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ if(logMINOR)
+ Logger.minor(this,
"finishRegister() for "+getters);
+ boolean wereAnyValid = false;
+ for(SendableGet getter :
getters) {
+
if(container.ext().isActive(getter))
+
Logger.error(this, "ALREADY ACTIVE in delayed finishRegister: "+getter);
+
container.activate(getter, 1);
+
if(!(getter.isCancelled(container) || getter.isEmpty(container))) {
+ wereAnyValid =
true;
+
schedCore.innerRegister(getter, random, container, getters);
+ }
+
container.deactivate(getter, 1);
+ }
+ if(!wereAnyValid) {
+ Logger.normal(this, "No
requests valid: "+getters);
+ }
+ if(reg != null)
+ container.delete(reg);
+
maybeFillStarterQueue(container, context, getters);
+ starter.wakeUp();
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ }
+ } else {
+ if(!anyValid) return;
+ // Register immediately.
+ for(int i=0;i<getters.length;i++)
+ schedTransient.innerRegister(getters[i],
random, null, getters);
+ starter.wakeUp();
+ }
}
- private synchronized void innerRegister(SendableRequest req) {
- if(logMINOR) Logger.minor(this, "Still registering "+req+" at
prio "+req.getPriorityClass()+" retry "+req.getRetryCount()+" for
"+req.getClientRequest());
- addToGrabArray(req.getPriorityClass(), req.getRetryCount(),
req.getClient(), req.getClientRequest(), req);
- HashSet<SendableRequest> v =
allRequestsByClientRequest.get(req.getClientRequest());
- if(v == null) {
- v = new HashSet<SendableRequest>();
- allRequestsByClientRequest.put(req.getClientRequest(),
v);
+ private void maybeFillStarterQueue(ObjectContainer container,
ClientContext context, SendableRequest[] mightBeActive) {
+ synchronized(this) {
+ if(starterQueue.size() > MAX_STARTER_QUEUE_SIZE / 2)
+ return;
}
- v.add(req);
- if(logMINOR) Logger.minor(this, "Registered "+req+" on
prioclass="+req.getPriorityClass()+", retrycount="+req.getRetryCount()+"
v.size()="+v.size());
+ fillRequestStarterQueue(container, context, mightBeActive);
}
+
+ public ChosenBlock getBetterNonPersistentRequest(short prio, int
retryCount) {
+ short fuzz = -1;
+ if(PRIORITY_SOFT.equals(choosenPriorityScheduler))
+ fuzz = -1;
+ else if(PRIORITY_HARD.equals(choosenPriorityScheduler))
+ fuzz = 0;
+ return schedCore.removeFirst(fuzz, random, offeredKeys,
starter, schedTransient, true, false, prio, retryCount, clientContext, null);
+ }
- private synchronized void addToGrabArray(short priorityClass, int
retryCount, Object client, ClientRequester cr, SendableRequest req) {
- if((priorityClass > RequestStarter.MINIMUM_PRIORITY_CLASS) ||
(priorityClass < RequestStarter.MAXIMUM_PRIORITY_CLASS))
- throw new IllegalStateException("Invalid priority:
"+priorityClass+" - range is "+RequestStarter.MAXIMUM_PRIORITY_CLASS+" (most
important) to "+RequestStarter.MINIMUM_PRIORITY_CLASS+" (least important)");
- // Priority
- SortedVectorByNumber prio = priorities[priorityClass];
- if(prio == null) {
- prio = new SortedVectorByNumber();
- priorities[priorityClass] = prio;
+ /**
+ * All the persistent SendableRequest's currently running (either
actually in flight, just chosen,
+ * awaiting the callbacks being executed etc). Note that this is an
ArrayList because we *must*
+ * compare by pointer: these objects may well implement hashCode() etc
for use by other code, but
+ * if they are deactivated, they will be unreliable. Fortunately, this
will be fairly small most
+ * of the time, since a single SendableRequest might include 256 actual
requests.
+ *
+ * SYNCHRONIZATION: Synched on starterQueue.
+ */
+ private final transient ArrayList<SendableRequest>
runningPersistentRequests = new ArrayList<SendableRequest> ();
+
+ public void removeRunningRequest(SendableRequest request) {
+ synchronized(starterQueue) {
+ for(int i=0;i<runningPersistentRequests.size();i++) {
+ if(runningPersistentRequests.get(i) == request)
{
+ runningPersistentRequests.remove(i);
+ i--;
+ if(logMINOR)
+ Logger.minor(this, "Removed
running request "+request+" size now "+runningPersistentRequests.size());
+ }
+ }
}
- // Client
- int rc = fixRetryCount(retryCount);
- SectoredRandomGrabArrayWithInt clientGrabber =
(SectoredRandomGrabArrayWithInt) prio.get(rc);
- if(clientGrabber == null) {
- clientGrabber = new
SectoredRandomGrabArrayWithInt(random, rc);
- prio.add(clientGrabber);
- if(logMINOR) Logger.minor(this, "Registering retry
count "+rc+" with prioclass "+priorityClass+" on "+clientGrabber+" for "+prio);
- }
- // SectoredRandomGrabArrayWithInt and lower down have
hierarchical locking and auto-remove.
- // To avoid a race condition it is essential to mirror that
here.
- synchronized(clientGrabber) {
- // Request
- SectoredRandomGrabArrayWithObject requestGrabber =
(SectoredRandomGrabArrayWithObject) clientGrabber.getGrabber(client);
- if(requestGrabber == null) {
- requestGrabber = new
SectoredRandomGrabArrayWithObject(client, random);
- if(logMINOR)
- Logger.minor(this, "Creating new
grabber: "+requestGrabber+" for "+client+" from "+clientGrabber+" : "+prio+" :
prio="+priorityClass+", rc="+rc);
- clientGrabber.addGrabber(client,
requestGrabber);
+ }
+
+ public boolean isRunningOrQueuedPersistentRequest(SendableRequest
request) {
+ synchronized(starterQueue) {
+ for(int i=0;i<runningPersistentRequests.size();i++) {
+ if(runningPersistentRequests.get(i) == request)
+ return true;
}
- requestGrabber.add(cr, req);
+ for(PersistentChosenRequest req : starterQueue) {
+ if(req.request == request) return true;
+ }
}
+ return false;
}
-
+
+ /** The maximum number of requests that we will keep on the in-RAM
request
+ * starter queue. */
+ static final int MAX_STARTER_QUEUE_SIZE = 512; // two full segments
+
+ /** The above doesn't include in-flight requests. In-flight requests
will
+ * of course still have PersistentChosenRequest's in the database (on
disk)
+ * even though they are not on the starter queue and so don't count
towards
+ * the above limit. So we have a higher limit before we complain that
+ * something odd is happening.. (e.g. leaking
PersistentChosenRequest's). */
+ static final int WARNING_STARTER_QUEUE_SIZE = 800;
+ private static final long WAIT_AFTER_NOTHING_TO_START = 60*1000;
+
+ private transient LinkedList<PersistentChosenRequest> starterQueue =
new LinkedList<PersistentChosenRequest>();
+
/**
- * Mangle the retry count.
- * Below a certain number of attempts, we don't prefer one request to
another just because
- * it's been tried more times. The reason for this is to prevent floods
of low-retry-count
- * requests from starving other clients' requests which need to be
retried. The other
- * solution would be to sort by client before retry count, but that
would be excessive
- * IMHO; we DO want to avoid rerequesting keys we've tried many times
before.
+ * Called by RequestStarter to find a request to run.
*/
- private int fixRetryCount(int retryCount) {
- return Math.max(0, retryCount-MIN_RETRY_COUNT);
+ public ChosenBlock grabRequest() {
+ while(true) {
+ PersistentChosenRequest reqGroup = null;
+ synchronized(starterQueue) {
+ short bestPriority = Short.MAX_VALUE;
+ int bestRetryCount = Integer.MAX_VALUE;
+ for(PersistentChosenRequest req : starterQueue)
{
+ if(req.prio < bestPriority ||
+ (req.prio ==
bestPriority && req.retryCount < bestRetryCount)) {
+ bestPriority = req.prio;
+ bestRetryCount = req.retryCount;
+ reqGroup = req;
+ }
+ }
+ }
+ if(reqGroup != null) {
+ // Try to find a better non-persistent request
+ if(logMINOR) Logger.minor(this, "Persistent
request: "+reqGroup+" prio "+reqGroup.prio+" retryCount "+reqGroup.retryCount);
+ ChosenBlock better =
getBetterNonPersistentRequest(reqGroup.prio, reqGroup.retryCount);
+ if(better != null) {
+ if(better.getPriority() >
reqGroup.prio) {
+ Logger.error(this, "Selected
"+better+" as better than "+reqGroup+" but isn't better!");
+ }
+ return better;
+ }
+ }
+ if(reqGroup == null) {
+ queueFillRequestStarterQueue();
+ return
getBetterNonPersistentRequest(Short.MAX_VALUE, Integer.MAX_VALUE);
+ }
+ ChosenBlock block;
+ int finalLength = 0;
+ synchronized(starterQueue) {
+ block =
reqGroup.grabNotStarted(clientContext.fastWeakRandom, this);
+ if(block == null) {
+ for(int i=0;i<starterQueue.size();i++) {
+ if(starterQueue.get(i) ==
reqGroup) {
+ starterQueue.remove(i);
+ if(logMINOR)
+
Logger.minor(this, "Removed "+reqGroup+" from starter queue because is empty");
+ i--;
+ } else {
+ finalLength +=
starterQueue.get(i).sizeNotStarted();
+ }
+ }
+ continue;
+ } else {
+ // Prevent this request being selected,
even though we may remove the PCR from the starter queue
+ // in the very near future. When the
PCR finishes, the requests will be un-blocked.
+
if(!runningPersistentRequests.contains(reqGroup.request))
+
runningPersistentRequests.add(reqGroup.request);
+ }
+ }
+ if(finalLength < MAX_STARTER_QUEUE_SIZE)
+ queueFillRequestStarterQueue();
+ if(logMINOR)
+ Logger.minor(this, "grabRequest() returning
"+block+" for "+reqGroup);
+ return block;
+ }
}
+
+ /** Don't fill the starter queue until this point. Used to implement a
60 second
+ * cooldown after failing to fill the queue: if there was nothing
queued, and since
+ * we know if more requests are started they will be added to the
queue, this is
+ * an acceptable optimisation to reduce the database load from the idle
schedulers... */
+ private long nextQueueFillRequestStarterQueue = -1;
+
+ public void queueFillRequestStarterQueue() {
+ if(System.currentTimeMillis() <
nextQueueFillRequestStarterQueue)
+ return;
+ if(starterQueueLength() > MAX_STARTER_QUEUE_SIZE / 2)
+ return;
+ jobRunner.queue(requestStarterQueueFiller,
NativeThread.MAX_PRIORITY, true);
+ }
- private int removeFirstAccordingToPriorities(boolean tryOfferedKeys){
- SortedVectorByNumber result = null;
-
- short fuzz = -1, iteration = 0, priority;
- synchronized (this) {
- if(PRIORITY_SOFT.equals(choosenPriorityScheduler))
- fuzz = -1;
- else if(PRIORITY_HARD.equals(choosenPriorityScheduler))
- fuzz = 0;
+ private int starterQueueLength() {
+ int length = 0;
+ synchronized(starterQueue) {
+ for(PersistentChosenRequest request : starterQueue)
+ length += request.sizeNotStarted();
}
- // we loop to ensure we try every possibilities ( n + 1)
- //
- // PRIO will do 0,1,2,3,4,5,6,0
- // TWEAKED will do rand%6,0,1,2,3,4,5,6
- while(iteration++ < RequestStarter.NUMBER_OF_PRIORITY_CLASSES +
1){
- priority = fuzz<0 ?
tweakedPrioritySelector[random.nextInt(tweakedPrioritySelector.length)] :
prioritySelector[Math.abs(fuzz % prioritySelector.length)];
- result = priorities[priority];
- if((result != null) &&
- (!result.isEmpty()) || (tryOfferedKeys
&& !offeredKeys[priority].isEmpty())) {
- if(logMINOR) Logger.minor(this, "using priority
: "+priority);
- return priority;
+ return length;
+ }
+
+ /**
+ * @param request
+ * @param container
+ * @return True if the queue is now full/over-full.
+ */
+ boolean addToStarterQueue(SendableRequest request, ObjectContainer
container) {
+ if(logMINOR)
+ Logger.minor(this, "Adding to starter queue: "+request);
+ container.activate(request, 1);
+ PersistentChosenRequest chosen;
+ try {
+ chosen = new PersistentChosenRequest(request,
request.getPriorityClass(container), request.getRetryCount(), container,
ClientRequestScheduler.this, clientContext);
+ } catch (NoValidBlocksException e) {
+ return false;
+ }
+ if(logMINOR)
+ Logger.minor(this, "Created PCR: "+chosen);
+ container.deactivate(request, 1);
+ boolean dumpNew = false;
+ synchronized(starterQueue) {
+ for(PersistentChosenRequest req : starterQueue) {
+ if(req.request == request) {
+ Logger.error(this, "Already on starter
queue: "+req+" for "+request, new Exception("debug"));
+ dumpNew = true;
+ break;
+ }
}
-
- if(logMINOR) Logger.minor(this, "Priority "+priority+"
is null (fuzz = "+fuzz+ ')');
- fuzz++;
+ if(!dumpNew) {
+ starterQueue.add(chosen);
+ int length = starterQueueLength();
+ length += chosen.sizeNotStarted();
+ runningPersistentRequests.add(request);
+ if(logMINOR)
+ Logger.minor(this, "Added to running
persistent requests, size now "+runningPersistentRequests.size()+" : "+request);
+ return length > MAX_STARTER_QUEUE_SIZE;
+ }
}
-
- //FIXME: implement NONE
- return -1;
+ if(dumpNew)
+ chosen.onDumped(schedCore, container, false);
+ return false;
}
- // LOCKING: Life is a good deal simpler if we just synchronize on
(this).
- // We prevent a number of race conditions (e.g. adding a retry count
and then another
- // thread removes it cos its empty) ... and in addToGrabArray etc we
already sync on this.
- // The worry is ... is there any nested locking outside of the
hierarchy?
- public synchronized SendableRequest removeFirst() {
- // Priorities start at 0
- if(logMINOR) Logger.minor(this, "removeFirst()");
- boolean tryOfferedKeys = offeredKeys != null &&
node.random.nextBoolean();
- int choosenPriorityClass =
removeFirstAccordingToPriorities(tryOfferedKeys);
- if(choosenPriorityClass == -1 && offeredKeys != null &&
!tryOfferedKeys) {
- tryOfferedKeys = true;
- choosenPriorityClass =
removeFirstAccordingToPriorities(tryOfferedKeys);
+ void removeFromStarterQueue(SendableRequest req, ObjectContainer
container, boolean reqAlreadyActive) {
+ PersistentChosenRequest dumped = null;
+ synchronized(starterQueue) {
+ for(int i=0;i<starterQueue.size();i++) {
+ PersistentChosenRequest pcr =
starterQueue.get(i);
+ if(pcr.request == req) {
+ starterQueue.remove(i);
+ dumped = pcr;
+ break;
+ }
+ }
}
- if(choosenPriorityClass == -1) {
- if(logMINOR)
- Logger.minor(this, "Nothing to do");
- return null;
+ if(dumped != null)
+ dumped.onDumped(schedCore, container, reqAlreadyActive);
+ }
+
+ int starterQueueSize() {
+ synchronized(starterQueue) {
+ return starterQueue.size();
}
- for(;choosenPriorityClass <=
RequestStarter.MINIMUM_PRIORITY_CLASS;choosenPriorityClass++) {
- if(logMINOR) Logger.minor(this, "Using priority
"+choosenPriorityClass);
- if(tryOfferedKeys) {
-
if(offeredKeys[choosenPriorityClass].hasValidKeys(starter))
- return offeredKeys[choosenPriorityClass];
+ }
+
+ /** Maximum number of requests to select from a single SendableRequest
*/
+ final int MAX_CONSECUTIVE_SAME_REQ = 50;
+
+ private DBJob requestStarterQueueFiller = new DBJob() {
+ public void run(ObjectContainer container, ClientContext
context) {
+ fillRequestStarterQueue(container, context, null);
}
- SortedVectorByNumber s = priorities[choosenPriorityClass];
- if(s != null){
- for(int retryIndex=0;retryIndex<s.count();retryIndex++)
{
- SectoredRandomGrabArrayWithInt retryTracker =
(SectoredRandomGrabArrayWithInt) s.getByIndex(retryIndex);
- if(retryTracker == null) {
- if(logMINOR) Logger.minor(this, "No
retrycount's left");
- break;
+ };
+
+ private void fillRequestStarterQueue(ObjectContainer container,
ClientContext context, SendableRequest[] mightBeActive) {
+ if(logMINOR) Logger.minor(this, "Filling request queue...
(SSK="+isSSKScheduler+" insert="+isInsertScheduler);
+ long noLaterThan = Long.MAX_VALUE;
+ if(!isInsertScheduler) {
+ noLaterThan =
moveKeysFromCooldownQueue(persistentCooldownQueue, true, container);
+ noLaterThan = Math.min(noLaterThan,
moveKeysFromCooldownQueue(transientCooldownQueue, false, container));
+ }
+ // If anything has been re-added, the request starter will have
been woken up.
+ short fuzz = -1;
+ if(PRIORITY_SOFT.equals(choosenPriorityScheduler))
+ fuzz = -1;
+ else if(PRIORITY_HARD.equals(choosenPriorityScheduler))
+ fuzz = 0;
+ boolean added = false;
+ synchronized(starterQueue) {
+ if(logMINOR && (!isSSKScheduler) &&
(!isInsertScheduler)) {
+ Logger.minor(this, "Scheduling CHK fetches...");
+ for(SendableRequest req :
runningPersistentRequests) {
+ boolean wasActive =
container.ext().isActive(req);
+ if(!wasActive) container.activate(req,
1);
+ Logger.minor(this, "Running persistent
request: "+req);
+ if(!wasActive)
container.deactivate(req, 1);
}
- while(true) {
- if(logMINOR)
- Logger.minor(this, "Got retry
count tracker "+retryTracker);
- SendableRequest req = (SendableRequest)
retryTracker.removeRandom(starter);
- if(retryTracker.isEmpty()) {
- if(logMINOR) Logger.minor(this,
"Removing retrycount "+retryTracker.getNumber()+" : "+retryTracker);
-
s.remove(retryTracker.getNumber());
- if(s.isEmpty()) {
- if(logMINOR)
Logger.minor(this, "Should remove priority ");
- }
+ }
+ // Recompute starterQueueLength
+ int length = 0;
+ PersistentChosenRequest old = null;
+ for(PersistentChosenRequest req : starterQueue) {
+ if(old == req)
+ Logger.error(this, "DUPLICATE CHOSEN
REQUESTS ON QUEUE: "+req);
+ if(old != null && old.request == req.request)
+ Logger.error(this, "DUPLICATE REQUEST
ON QUEUE: "+old+" vs "+req+" both "+req.request);
+ boolean ignoreActive = false;
+ if(mightBeActive != null) {
+ for(SendableRequest tmp : mightBeActive)
+ if(tmp == req.request)
ignoreActive = true;
+ }
+ if(!ignoreActive) {
+
if(container.ext().isActive(req.request))
+ Logger.error(this, "REQUEST
ALREADY ACTIVATED: "+req.request+" for "+req+" while checking request queue in
filling request queue");
+ else if(logMINOR)
+ Logger.minor(this, "Not already
activated for "+req+" in while checking request queue in filling request
queue");
+ } else if(logMINOR)
+ Logger.minor(this, "Ignoring active
because just registered: "+req.request);
+
req.pruneDuplicates(ClientRequestScheduler.this);
+ old = req;
+ length += req.sizeNotStarted();
+ }
+ if(logMINOR) Logger.minor(this, "Queue size: "+length+"
SSK="+isSSKScheduler+" insert="+isInsertScheduler);
+ if(length >= MAX_STARTER_QUEUE_SIZE) {
+ if(length >= WARNING_STARTER_QUEUE_SIZE)
+ Logger.error(this, "Queue already full:
"+length);
+ return;
+ }
+ if(length > MAX_STARTER_QUEUE_SIZE * 3 / 4) {
+ return;
+ }
+ }
+
+ if((!isSSKScheduler) && (!isInsertScheduler)) {
+ Logger.minor(this, "Scheduling CHK fetches...");
+ }
+ boolean addedMore = false;
+ while(true) {
+ SendableRequest request =
schedCore.removeFirstInner(fuzz, random, offeredKeys, starter, schedTransient,
false, true, Short.MAX_VALUE, Integer.MAX_VALUE, context, container);
+ if(request == null) {
+ synchronized(ClientRequestScheduler.this) {
+ // Don't wake up for a while, but no
later than the time we expect the next item to come off the cooldown queue
+ if(!added) {
+
nextQueueFillRequestStarterQueue =
+
System.currentTimeMillis() + WAIT_AFTER_NOTHING_TO_START;
+
if(nextQueueFillRequestStarterQueue > noLaterThan)
+
nextQueueFillRequestStarterQueue = noLaterThan + 1;
}
- if(req == null) {
- if(logMINOR) Logger.minor(this,
"No requests, adjusted retrycount "+retryTracker.getNumber()+" ("+retryTracker+
')');
- break; // Try next retry count.
- } else if(req.getPriorityClass() !=
choosenPriorityClass) {
- // Reinsert it : shouldn't
happen if we are calling reregisterAll,
- // maybe we should ask people
to report that error if seen
- Logger.normal(this, "In wrong
priority class: "+req+" (req.prio="+req.getPriorityClass()+" but
chosen="+choosenPriorityClass+ ')');
- // Remove it.
-
SectoredRandomGrabArrayWithObject clientGrabber =
(SectoredRandomGrabArrayWithObject) retryTracker.getGrabber(req.getClient());
- if(clientGrabber != null) {
- RandomGrabArray baseRGA
= (RandomGrabArray) clientGrabber.getGrabber(req.getClientRequest());
- if(baseRGA != null) {
-
baseRGA.remove(req);
- }
- } else {
- Logger.error(this,
"Could not find client grabber for client "+req.getClient()+" from
"+retryTracker);
- }
- innerRegister(req);
- continue; // Try the next one
on this retry count.
- }
-
- RandomGrabArray altRGA = null;
- synchronized(this) {
- if(!recentSuccesses.isEmpty()) {
-
if(random.nextBoolean()) {
-
WeakReference<RandomGrabArray> ref = recentSuccesses.removeLast();
- altRGA =
ref.get();
- }
- }
- }
- if(altRGA != null) {
- SendableRequest altReq =
(SendableRequest) (altRGA.removeRandom(starter));
- if(altReq != null &&
altReq.getPriorityClass() <= choosenPriorityClass &&
-
fixRetryCount(altReq.getRetryCount()) <= retryTracker.getNumber()) {
- // Use the recent one
instead
- if(logMINOR)
-
Logger.minor(this, "Recently succeeded req "+altReq+" is better, using that,
reregistering chosen "+req);
- innerRegister(req);
- req = altReq;
- } else {
- if(altReq != null) {
-
synchronized(this) {
-
recentSuccesses.addLast(new WeakReference<RandomGrabArray>(altRGA));
- }
- if(logMINOR)
-
Logger.minor(this, "Chosen req "+req+" is better, reregistering recently
succeeded "+altReq);
-
innerRegister(altReq);
- }
- }
- }
-
- if(logMINOR) Logger.debug(this,
"removeFirst() returning "+req+" ("+retryTracker.getNumber()+", prio "+
-
req.getPriorityClass()+", retries "+req.getRetryCount()+", client
"+req.getClient()+", client-req "+req.getClientRequest()+ ')');
- ClientRequester cr =
req.getClientRequest();
- if(req.canRemove()) {
- synchronized(this) {
-
HashSet<SendableRequest> v = allRequestsByClientRequest.get(cr);
- if(v == null) {
-
Logger.error(this, "No HashSet registered for "+cr);
- } else {
- boolean removed
= v.remove(req);
- if(v.isEmpty())
-
allRequestsByClientRequest.remove(cr);
- if(logMINOR)
Logger.minor(this, (removed ? "" : "Not ") + "Removed from HashSet for "+cr+"
which now has "+v.size()+" elements");
- }
- }
- // Do not remove from the
pendingKeys list.
- // Whether it is running a
request, waiting to execute, or waiting on the
- // cooldown queue, ULPRs and
backdoor coalescing should still be active.
- }
- if(logMINOR) Logger.minor(this,
"removeFirst() returning "+req+" of "+req.getClientRequest());
- return req;
}
+ if(addedMore) starter.wakeUp();
+ return;
}
+ boolean full = addToStarterQueue(request, container);
+ container.deactivate(request, 1);
+ if(!added) starter.wakeUp();
+ else addedMore = true;
+ added = true;
+ if(full) {
+ if(addedMore) starter.wakeUp();
+ return;
+ }
}
+ }
+
+ /**
+ * Compare a recently registered SendableRequest to what is already on
the
+ * starter queue. If it is better, kick out stuff from the queue until
we
+ * are just over the limit.
+ * @param req
+ * @param container
+ */
+ public void maybeAddToStarterQueue(SendableRequest req, ObjectContainer
container, SendableRequest[] mightBeActive) {
+ short prio = req.getPriorityClass(container);
+ int retryCount = req.getRetryCount();
+ if(logMINOR)
+ Logger.minor(this, "Maybe adding to starter queue:
prio="+prio+" retry count="+retryCount);
+ boolean logDEBUG = Logger.shouldLog(Logger.DEBUG, this);
+ synchronized(starterQueue) {
+ boolean betterThanSome = false;
+ int size = 0;
+ PersistentChosenRequest prev = null;
+ for(PersistentChosenRequest old : starterQueue) {
+ if(old.request == req) {
+ // Wait for a reselect. Otherwise we
can starve other
+ // requests. Note that this happens
with persistent SBI's:
+ // they are added at the new retry
count before being
+ // removed at the old retry count.
+ if(logMINOR) Logger.minor(this,
"Already on starter queue: "+old+" for "+req);
+ return;
+ }
+ if(prev == old)
+ Logger.error(this, "ON STARTER QUEUE
TWICE: "+prev+" for "+prev.request);
+ if(prev != null && prev.request == old.request)
+ Logger.error(this, "REQUEST ON STARTER
QUEUE TWICE: "+prev+" for "+prev.request+" vs "+old+" for "+old.request);
+ boolean ignoreActive = false;
+ if(mightBeActive != null) {
+ for(SendableRequest tmp : mightBeActive)
+ if(tmp == old.request)
ignoreActive = true;
+ }
+ if(!ignoreActive) {
+
if(container.ext().isActive(old.request))
+ Logger.error(this, "REQUEST
ALREADY ACTIVATED: "+old.request+" for "+old+" while checking request queue in
maybeAddToStarterQueue for "+req);
+ else if(logDEBUG)
+ Logger.debug(this, "Not already
activated for "+old+" in while checking request queue in maybeAddToStarterQueue
for "+req);
+ } else if(logMINOR)
+ Logger.minor(this, "Ignoring active
because just registered: "+old.request+" in maybeAddToStarterQueue for "+req);
+ size += old.sizeNotStarted();
+ if(old.prio > prio || old.prio == prio &&
old.retryCount > retryCount)
+ betterThanSome = true;
+ if(old.request == req) return;
+ prev = old;
+ }
+ if(size >= MAX_STARTER_QUEUE_SIZE && !betterThanSome) {
+ if(logMINOR)
+ Logger.minor(this, "Not adding to
starter queue: over limit and req not better than any queued requests");
+ return;
+ }
}
- if(logMINOR) Logger.minor(this, "No requests to run");
- return null;
+ addToStarterQueue(req, container);
+ trimStarterQueue(container);
}
- public void removePendingKey(SendableGet getter, boolean complain, Key
key) {
- boolean dropped = false;
- Object o;
- synchronized(pendingKeys) {
- o = pendingKeys.get(key);
- if(o == null) {
- if(complain)
- Logger.normal(this, "Not found:
"+getter+" for "+key+" removing (no such key)");
- } else if(o instanceof SendableGet) {
- SendableGet oldGet = (SendableGet) o;
- if(oldGet != getter) {
- if(complain)
- Logger.normal(this, "Not found:
"+getter+" for "+key+" removing (1 getter)");
- } else {
- dropped = true;
- pendingKeys.remove(key);
+ private void trimStarterQueue(ObjectContainer container) {
+ ArrayList<PersistentChosenRequest> dumped = null;
+ synchronized(starterQueue) {
+ int length = starterQueueLength();
+ while(length > MAX_STARTER_QUEUE_SIZE) {
+ // Find the lowest priority/retry count request.
+ // If we can dump it without going below the
limit, then do so.
+ // If we can't, return.
+ PersistentChosenRequest worst = null;
+ short worstPrio = -1;
+ int worstRetryCount = -1;
+ int worstIndex = -1;
+ int worstLength = -1;
+ if(starterQueue.isEmpty()) {
+ break;
}
- } else {
- SendableGet[] gets = (SendableGet[]) o;
- final int getsLength = gets.length;
- SendableGet[] newGets = new
SendableGet[getsLength > 1 ? getsLength-1 : 0];
- boolean found = false;
- int x = 0;
- for(int j=0;j<getsLength;j++) {
- if(gets[j] == getter) {
- found = true;
- dropped = true;
+ length = 0;
+ for(int i=0;i<starterQueue.size();i++) {
+ PersistentChosenRequest req =
starterQueue.get(i);
+ short prio = req.prio;
+ int retryCount = req.retryCount;
+ int size = req.sizeNotStarted();
+ length += size;
+ if(prio > worstPrio ||
+ (prio == worstPrio &&
retryCount > worstRetryCount)) {
+ worstPrio = prio;
+ worstRetryCount = retryCount;
+ worst = req;
+ worstIndex = i;
+ worstLength = size;
continue;
}
- if(x == newGets.length) {
- if(!found) {
- if(complain)
-
Logger.normal(this, "Not found: "+getter+" for "+key+" removing ("+getsLength+"
getters)");
- return; // not here
- } // else is a contradiction,
let it get an ArrayIndexOutOfBounds.
- }
- if(gets[j] == null ||
gets[j].isCancelled()) continue;
- newGets[x++] = gets[j];
}
- if(x == 0) {
- pendingKeys.remove(key);
- } else if(x == 1) {
- pendingKeys.put(key, newGets[0]);
+ int lengthAfter = length - worstLength;
+ if(lengthAfter >= MAX_STARTER_QUEUE_SIZE) {
+ if(dumped == null)
+ dumped = new
ArrayList<PersistentChosenRequest>(2);
+ dumped.add(worst);
+ starterQueue.remove(worstIndex);
+ if(lengthAfter ==
MAX_STARTER_QUEUE_SIZE) break;
} else {
- if(x != getsLength-1) {
- SendableGet[] newNewGets = new
SendableGet[x];
- System.arraycopy(newGets, 0,
newNewGets, 0, x);
- newGets = newNewGets;
- }
- pendingKeys.put(key, newGets);
+ // Can't remove any more.
+ break;
}
}
}
- if(dropped && offeredKeys != null && !node.peersWantKey(key)) {
- for(int i=0;i<offeredKeys.length;i++)
- offeredKeys[i].remove(key);
+ if(dumped == null) return;
+ for(PersistentChosenRequest req : dumped) {
+ req.onDumped(schedCore, container, false);
}
- if(cooldownQueue != null)
- cooldownQueue.removeKey(key, getter,
getter.getCooldownWakeupByKey(key));
}
-
+
/**
- * Remove a SendableGet from the list of getters we maintain for each
key, indicating that we are no longer interested
- * in that key.
+ * Remove a KeyListener from the list of KeyListeners.
* @param getter
* @param complain
*/
- public void removePendingKeys(SendableGet getter, boolean complain) {
- Object[] keyTokens = getter.allKeys();
- for(int i=0;i<keyTokens.length;i++) {
- Object tok = keyTokens[i];
- ClientKey ckey = getter.getKey(tok);
- if(ckey == null) {
- if(complain)
- Logger.error(this, "Key "+tok+" is null
for "+getter, new Exception("debug"));
- continue;
- }
- removePendingKey(getter, complain, ckey.getNodeKey());
- }
+ public void removePendingKeys(KeyListener getter, boolean complain) {
+ boolean found = schedTransient.removePendingKeys(getter);
+ found |= schedCore.removePendingKeys(getter);
+ if(complain && !found)
+ Logger.error(this, "Listener not found when removing:
"+getter);
}
- public void reregisterAll(ClientRequester request) {
- SendableRequest[] reqs;
- synchronized(this) {
- HashSet<SendableRequest> h =
allRequestsByClientRequest.get(request);
- if(h == null) return;
- reqs = h.toArray(new SendableRequest[h.size()]);
- }
-
- for(int i=0;i<reqs.length;i++) {
- SendableRequest req = reqs[i];
- // Unregister from the RGA's, but keep the pendingKeys
and cooldown queue data.
- req.unregister(true);
- // Then can do innerRegister() (not register()).
- innerRegister(req);
- }
+ /**
+ * Remove a KeyListener from the list of KeyListeners.
+ * @param getter
+ * @param complain
+ */
+ public void removePendingKeys(HasKeyListener getter, boolean complain) {
+ boolean found = schedTransient.removePendingKeys(getter);
+ found |= schedCore.removePendingKeys(getter);
+ if(complain && !found)
+ Logger.error(this, "Listener not found when removing:
"+getter);
+ }
+
+ public void reregisterAll(final ClientRequester request,
ObjectContainer container) {
+ schedTransient.reregisterAll(request, random, this, null,
clientContext);
+ schedCore.reregisterAll(request, random, this, container,
clientContext);
starter.wakeUp();
}
-
+
public String getChoosenPriorityScheduler() {
return choosenPriorityScheduler;
}
- public void succeeded(RandomGrabArray parentGrabArray) {
- synchronized(this) {
- if(logMINOR)
- Logger.minor(this, "Recording successful fetch
from "+parentGrabArray);
- recentSuccesses.addFirst(new
WeakReference<RandomGrabArray>(parentGrabArray));
- while(recentSuccesses.size() > 8)
- recentSuccesses.removeLast();
- }
+ /*
+ * tripPendingKey() callbacks must run quickly, since we've found a
block.
+ * succeeded() must run quickly, since we delete the
PersistentChosenRequest.
+ * tripPendingKey() must run before succeeded() so we don't choose the
same
+ * request again, then remove it from pendingKeys before it completes!
+ */
+ static final short TRIP_PENDING_PRIORITY = NativeThread.HIGH_PRIORITY-1;
+
+ public synchronized void succeeded(final BaseSendableGet succeeded,
final ChosenBlock req) {
+ if(req.isPersistent()) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(container.ext().isActive(succeeded))
+ Logger.error(this, "ALREADY
ACTIVE in succeeded(): "+succeeded);
+ container.activate(succeeded, 1);
+ schedCore.succeeded(succeeded,
container);
+ container.deactivate(succeeded, 1);
+ }
+
+ }, TRIP_PENDING_PRIORITY, false);
+ // Boost the priority so the PersistentChosenRequest
gets deleted reasonably quickly.
+ } else
+ schedTransient.succeeded(succeeded, null);
}
public void tripPendingKey(final KeyBlock block) {
if(logMINOR) Logger.minor(this,
"tripPendingKey("+block.getKey()+")");
+
if(offeredKeys != null) {
for(int i=0;i<offeredKeys.length;i++) {
offeredKeys[i].remove(block.getKey());
}
}
final Key key = block.getKey();
- final SendableGet[] gets;
- Object o;
- synchronized(pendingKeys) {
- o = pendingKeys.get(key);
- pendingKeys.remove(key);
- }
- if(o == null) return;
- if(o instanceof SendableGet) {
- gets = new SendableGet[] { (SendableGet) o };
- if(cooldownQueue != null)
- cooldownQueue.removeKey(key, (SendableGet)o,
((SendableGet)o).getCooldownWakeupByKey(key));
- } else {
- gets = (SendableGet[]) o;
- if(cooldownQueue != null)
- for(int i=0;i<gets.length;i++)
- cooldownQueue.removeKey(key, gets[i],
gets[i].getCooldownWakeupByKey(key));
-
- }
- if(gets == null) return;
- Runnable r = new Runnable() {
- public void run() {
- if(logMINOR) Logger.minor(this, "Running
"+gets.length+" callbacks off-thread for "+block.getKey());
- for(SendableGet get : gets) {
- try {
- if(logMINOR) Logger.minor(this,
"Calling callback for "+get+" for "+key);
- get.onGotKey(key, block,
ClientRequestScheduler.this);
- } catch (Throwable t) {
- Logger.error(this, "Caught
"+t+" running callback "+get+" for "+key, t);
- }
+ schedTransient.tripPendingKey(key, block, null, clientContext);
+ if(schedCore.anyProbablyWantKey(key, clientContext)) {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(logMINOR) Logger.minor(this,
"tripPendingKey for "+key);
+ schedCore.tripPendingKey(key, block,
container, clientContext);
}
- if(logMINOR) Logger.minor(this, "Finished
running callbacks");
- }
- };
- node.executor.execute(r, "Callbacks for "+name);
+ }, TRIP_PENDING_PRIORITY, false);
+ } else schedCore.countNegative();
}
- public boolean anyWantKey(Key key) {
- synchronized(pendingKeys) {
- return pendingKeys.get(key) != null;
- }
- }
-
/** If we want the offered key, or if force is enabled, queue it */
- public void maybeQueueOfferedKey(Key key, boolean force) {
+ public void maybeQueueOfferedKey(final Key key, boolean force) {
if(logMINOR)
Logger.minor(this,
"maybeQueueOfferedKey("+key+","+force);
short priority = Short.MAX_VALUE;
- synchronized(pendingKeys) {
- if(force) {
- // FIXME what priority???
- priority =
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS;
+ if(force) {
+ // FIXME what priority???
+ priority = RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS;
+ }
+ priority = schedTransient.getKeyPrio(key, priority, null,
clientContext);
+ if(priority < Short.MAX_VALUE) {
+ offeredKeys[priority].queueKey(key);
+ starter.wakeUp();
+ }
+
+ final short oldPrio = priority;
+
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ // Don't activate/deactivate the key, because
it's not persistent in the first place!!
+ short priority = schedCore.getKeyPrio(key,
oldPrio, container, context);
+ if(priority >= oldPrio) return; // already on
list at >= priority
+ offeredKeys[priority].queueKey(key.cloneKey());
+ starter.wakeUp();
}
- Object o = pendingKeys.get(key);
- if(o == null) {
- // Blah
- } else if(o instanceof SendableGet) {
- short p = ((SendableGet)o).getPriorityClass();
- if(p < priority) priority = p;
- } else { // if(o instanceof SendableGet[]) {
- SendableGet[] gets = (SendableGet[]) o;
- for(int i=0;i<gets.length;i++) {
- short p = gets[i].getPriorityClass();
- if(p < priority) priority = p;
- }
- }
- }
- if(priority == Short.MAX_VALUE) return;
- if(logMINOR)
- Logger.minor(this, "Priority: "+priority);
- offeredKeys[priority].queueKey(key);
- starter.wakeUp();
+
+ }, NativeThread.NORM_PRIORITY, false);
}
public void dequeueOfferedKey(Key key) {
@@ -719,38 +1001,211 @@
}
}
- public long queueCooldown(ClientKey key, SendableGet getter) {
- return cooldownQueue.add(key.getNodeKey(), getter);
+ /**
+ * MUST be called from database thread!
+ */
+ public long queueCooldown(ClientKey key, SendableGet getter,
ObjectContainer container) {
+ if(getter.persistent())
+ return persistentCooldownQueue.add(key.getNodeKey(),
getter, container);
+ else
+ return transientCooldownQueue.add(key.getNodeKey(),
getter, null);
}
- public void moveKeysFromCooldownQueue() {
- if(cooldownQueue == null) return;
+ /**
+ * Restore keys from the given cooldown queue. Find any keys that are
due to be
+ * restored, restore all requests both persistent and non-persistent
for those keys.
+ * @param queue
+ * @param persistent
+ * @param container
+ * @return Long.MAX_VALUE if nothing is queued in the next
WAIT_AFTER_NOTHING_TO_START
+ * millis, the time at which the next key is due to be restored if
there are keys queued
+ * to be restarted in the near future.
+ */
+ private long moveKeysFromCooldownQueue(CooldownQueue queue, boolean
persistent, ObjectContainer container) {
+ if(queue == null) return Long.MAX_VALUE;
long now = System.currentTimeMillis();
- Key key;
- while((key = cooldownQueue.removeKeyBefore(now)) != null) {
+ /*
+ * Only go around once. We will be called again. If there are
keys to move, then RequestStarter will not
+ * sleep, because it will start them. Then it will come back
here. If we are off-thread i.e. on the database
+ * thread, then we will wake it up if we find keys... and we'll
be scheduled again.
+ *
+ * FIXME: I think we need to restore all the listeners for a
single key
+ * simultaneously to avoid some kind of race condition? Or
could we just
+ * restore the one request on the queue? Maybe it's just a
misguided
+ * optimisation? IIRC we had some severe problems when we
didn't have
+ * this, related to requests somehow being lost altogether...
Is it
+ * essential? We can save a query if it's not... Is this about
requests
+ * or about keys? Should we limit all requests across any
+ * SendableRequest's to 3 every half hour for a specific key?
Probably
+ * yes...? In which case, can the cooldown queue be entirely in
RAM,
+ * and would it be useful for it to be? Less disk, more RAM...
for fast
+ * nodes with little RAM it would be bad...
+ */
+ final int MAX_KEYS = 20;
+ Object ret = queue.removeKeyBefore(now,
WAIT_AFTER_NOTHING_TO_START, container, MAX_KEYS);
+ if(ret == null) return Long.MAX_VALUE;
+ if(ret instanceof Long) {
+ return (Long) ret;
+ }
+ Key[] keys = (Key[]) ret;
+ for(int j=0;j<keys.length;j++) {
+ Key key = keys[j];
+ if(persistent)
+ container.activate(key, 5);
if(logMINOR) Logger.minor(this, "Restoring key: "+key);
- Object o;
- synchronized(pendingKeys) {
- o = pendingKeys.get(key);
- }
- if(o == null) {
+ SendableGet[] reqs = schedCore.requestsForKey(key,
container, clientContext);
+ SendableGet[] transientReqs =
schedTransient.requestsForKey(key, container, clientContext);
+ if(reqs == null && transientReqs == null) {
// Not an error as this can happen due to race
conditions etc.
if(logMINOR) Logger.minor(this, "Restoring key
but no keys queued?? for "+key);
- continue;
- } else if(o instanceof SendableGet) {
- SendableGet get = (SendableGet) o;
- get.requeueAfterCooldown(key, now);
- } else {
- SendableGet[] gets = (SendableGet[]) o;
- for(int i=0;i<gets.length;i++)
- gets[i].requeueAfterCooldown(key, now);
}
+ if(reqs != null) {
+ for(int i=0;i<reqs.length;i++) {
+ // Requests may or may not be returned
activated from requestsForKey(), so don't check
+ // But do deactivate them once we're
done with them.
+ container.activate(reqs[i], 1);
+ reqs[i].requeueAfterCooldown(key, now,
container, clientContext);
+ container.deactivate(reqs[i], 1);
+ }
+ }
+ if(transientReqs != null) {
+ for(int i=0;i<transientReqs.length;i++)
+
transientReqs[i].requeueAfterCooldown(key, now, container, clientContext);
+ }
+ if(persistent)
+ container.deactivate(key, 5);
}
+ return Long.MAX_VALUE;
}
- public long countQueuedRequests() {
- if(pendingKeys != null)
- return pendingKeys.size();
- else return 0;
+ public long countTransientQueuedRequests() {
+ return schedTransient.countQueuedRequests(null, clientContext);
}
+
+ public KeysFetchingLocally fetchingKeys() {
+ return schedCore;
+ }
+
+ public void removeFetchingKey(Key key) {
+ schedCore.removeFetchingKey(key);
+ }
+
+ public void removeTransientInsertFetching(SendableInsert insert, Object
token) {
+ schedCore.removeTransientInsertFetching(insert, token);
+ }
+
+ /**
+ * Map from SendableGet implementing SupportsBulkCallFailure to
BulkCallFailureItem[].
+ */
+ private transient HashMap bulkFailureLookupItems = new HashMap();
+ private transient HashMap bulkFailureLookupJob = new HashMap();
+
+ public void callFailure(final SendableGet get, final
LowLevelGetException e, int prio, boolean persistent) {
+ if(!persistent) {
+ get.onFailure(e, null, null, clientContext);
+ } else {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(container.ext().isActive(get))
+ Logger.error(this, "ALREADY
ACTIVE: "+get+" in callFailure(request)");
+ container.activate(get, 1);
+ get.onFailure(e, null, container,
clientContext);
+ container.deactivate(get, 1);
+ }
+
+ }, prio, false);
+ }
+ }
+
+ public void callFailure(final SendableInsert insert, final
LowLevelPutException e, int prio, boolean persistent) {
+ if(!persistent) {
+ insert.onFailure(e, null, null, clientContext);
+ } else {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(container.ext().isActive(insert))
+ Logger.error(this, "ALREADY
ACTIVE: "+insert+" in callFailure(insert)");
+ container.activate(insert, 1);
+ insert.onFailure(e, null, container,
context);
+ container.deactivate(insert, 1);
+ }
+
+ }, prio, false);
+ }
+ }
+
+ public FECQueue getFECQueue() {
+ return clientContext.fecQueue;
+ }
+
+ public ClientContext getContext() {
+ return clientContext;
+ }
+
+ /**
+ * @return True unless the key was already present.
+ */
+ public boolean addToFetching(Key key) {
+ return schedCore.addToFetching(key);
+ }
+
+ public boolean addTransientInsertFetching(SendableInsert insert, Object
token) {
+ return schedCore.addTransientInsertFetching(insert, token);
+ }
+
+ public boolean hasFetchingKey(Key key) {
+ return schedCore.hasKey(key);
+ }
+
+ public long countPersistentWaitingKeys(ObjectContainer container) {
+ return schedCore.countWaitingKeys(container);
+ }
+
+ public long countPersistentQueuedRequests(ObjectContainer container) {
+ return schedCore.countQueuedRequests(container, clientContext);
+ }
+
+ public boolean isQueueAlmostEmpty() {
+ return starterQueueSize() < MAX_STARTER_QUEUE_SIZE / 4;
+ }
+
+ public boolean isInsertScheduler() {
+ return isInsertScheduler;
+ }
+
+ public void removeFromAllRequestsByClientRequest(ClientRequester
clientRequest, SendableRequest get, boolean dontComplain, ObjectContainer
container) {
+ if(get.persistent())
+ schedCore.removeFromAllRequestsByClientRequest(get,
clientRequest, dontComplain, container);
+ else
+
schedTransient.removeFromAllRequestsByClientRequest(get, clientRequest,
dontComplain, null);
+ }
+
+ public byte[] saltKey(Key key) {
+ MessageDigest md = SHA256.getMessageDigest();
+ md.update(key.getRoutingKey());
+ md.update(schedCore.globalSalt);
+ byte[] ret = md.digest();
+ SHA256.returnMessageDigest(md);
+ return ret;
+ }
+
+ void addPersistentPendingKeys(KeyListener listener) {
+ schedCore.addPendingKeys(listener);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing ClientRequestScheduler in
database", new Exception("error"));
+ return false;
+ }
+
+ public void wakeStarter() {
+ starter.wakeUp();
+ }
+
+ public boolean cacheInserts() {
+ return this.node.clientCore.cacheInserts();
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/ClientRequestSchedulerBase.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerBase.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequestSchedulerBase.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/ClientRequestSchedulerBase.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,484 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.db4o.ObjectContainer;
+
+import freenet.crypt.RandomSource;
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.keys.NodeSSK;
+import freenet.node.BaseSendableGet;
+import freenet.node.RequestClient;
+import freenet.node.RequestScheduler;
+import freenet.node.RequestStarter;
+import freenet.node.SendableGet;
+import freenet.node.SendableInsert;
+import freenet.node.SendableRequest;
+import freenet.support.LogThresholdCallback;
+import freenet.support.Logger;
+import freenet.support.RandomGrabArray;
+import freenet.support.SectoredRandomGrabArrayWithInt;
+import freenet.support.SectoredRandomGrabArrayWithObject;
+import freenet.support.SortedVectorByNumber;
+
+/**
+ * Base class for ClientRequestSchedulerCore and
ClientRequestSchedulerNonPersistent,
+ * contains some of the methods and most of the variables. In particular, it
contains all
+ * the methods that deal primarily with pendingKeys.
+ * @author toad
+ */
+abstract class ClientRequestSchedulerBase {
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ /** Minimum number of retries at which we start to hold it against a
request.
+ * See the comments on fixRetryCount; we don't want many untried
requests to prevent
+ * us from trying requests which have only been tried once (e.g. USK
checkers), from
+ * other clients (and we DO want retries to take precedence over client
round robin IF
+ * the request has been tried many times already). */
+ private static final int MIN_RETRY_COUNT = 3;
+
+ final boolean isInsertScheduler;
+ final boolean isSSKScheduler;
+
+ /**
+ * Structure:
+ * array (by priority) -> // one element per possible priority
+ * SortedVectorByNumber (by # retries) -> // contains each current
#retries
+ * RandomGrabArray // contains each element, allows fast
fetch-and-drop-a-random-element
+ *
+ * To speed up fetching, a RGA or SVBN must only exist if it is
non-empty.
+ */
+ protected final SortedVectorByNumber[] priorities;
+ protected transient ClientRequestScheduler sched;
+ /** Transient even for persistent scheduler. */
+ protected transient Set<KeyListener> keyListeners;
+
+ abstract boolean persistent();
+
+ protected ClientRequestSchedulerBase(boolean forInserts, boolean
forSSKs) {
+ this.isInsertScheduler = forInserts;
+ this.isSSKScheduler = forSSKs;
+ keyListeners = new HashSet<KeyListener>();
+ priorities = new
SortedVectorByNumber[RequestStarter.NUMBER_OF_PRIORITY_CLASSES];
+ }
+
+ /**
+ * @param req
+ * @param random
+ * @param container
+ * @param maybeActive Array of requests, can be null, which are being
registered
+ * in this group. These will be ignored for purposes of checking
whether stuff
+ * is activated when it shouldn't be. It is perfectly okay to have req
be a
+ * member of maybeActive.
+ *
+ * FIXME: Either get rid of the debugging code and therefore get rid of
maybeActive,
+ * or make req a SendableRequest[] and register them all at once.
+ */
+ void innerRegister(SendableRequest req, RandomSource random,
ObjectContainer container, SendableRequest[] maybeActive) {
+ if(isInsertScheduler && req instanceof BaseSendableGet)
+ throw new IllegalArgumentException("Adding a
SendableGet to an insert scheduler!!");
+ if((!isInsertScheduler) && req instanceof SendableInsert)
+ throw new IllegalArgumentException("Adding a
SendableInsert to a request scheduler!!");
+ if(isInsertScheduler != req.isInsert())
+ throw new IllegalArgumentException("Request
isInsert="+req.isInsert()+" but my isInsertScheduler="+isInsertScheduler+"!!");
+ if(req.persistent() != persistent())
+ throw new IllegalArgumentException("innerRegister for
persistence="+req.persistent()+" but our persistence is "+persistent());
+ if(req.getPriorityClass(container) == 0) {
+ Logger.normal(this, "Something wierd...");
+ Logger.normal(this, "Priority
"+req.getPriorityClass(container));
+ }
+ int retryCount = req.getRetryCount();
+ short prio = req.getPriorityClass(container);
+ if(logMINOR) Logger.minor(this, "Still registering "+req+" at
prio "+prio+" retry "+retryCount+" for "+req.getClientRequest());
+ addToRequestsByClientRequest(req.getClientRequest(), req,
container);
+ addToGrabArray(prio, retryCount, fixRetryCount(retryCount),
req.getClient(container), req.getClientRequest(), req, random, container);
+ if(logMINOR) Logger.minor(this, "Registered "+req+" on
prioclass="+prio+", retrycount="+retryCount);
+ if(persistent())
+ sched.maybeAddToStarterQueue(req, container,
maybeActive);
+ }
+
+ protected void addToRequestsByClientRequest(ClientRequester
clientRequest, SendableRequest req, ObjectContainer container) {
+ if(clientRequest != null || persistent()) { // Client request
null is only legal for transient requests
+ boolean deactivate = false;
+ if(persistent()) {
+ deactivate =
!container.ext().isActive(clientRequest);
+ if(deactivate)
container.activate(clientRequest, 1);
+ }
+ clientRequest.addToRequests(req, container);
+ if(deactivate) container.deactivate(clientRequest, 1);
+ }
+ }
+
+ synchronized void addToGrabArray(short priorityClass, int retryCount,
int rc, RequestClient client, ClientRequester cr, SendableRequest req,
RandomSource random, ObjectContainer container) {
+ if((priorityClass > RequestStarter.MINIMUM_PRIORITY_CLASS) ||
(priorityClass < RequestStarter.MAXIMUM_PRIORITY_CLASS))
+ throw new IllegalStateException("Invalid priority:
"+priorityClass+" - range is "+RequestStarter.MAXIMUM_PRIORITY_CLASS+" (most
important) to "+RequestStarter.MINIMUM_PRIORITY_CLASS+" (least important)");
+ // Priority
+ SortedVectorByNumber prio = priorities[priorityClass];
+ if(prio == null) {
+ prio = new SortedVectorByNumber(persistent());
+ priorities[priorityClass] = prio;
+ if(persistent())
+ container.store(this);
+ }
+ // Client
+ SectoredRandomGrabArrayWithInt clientGrabber =
(SectoredRandomGrabArrayWithInt) prio.get(rc, container);
+ if(persistent()) container.activate(clientGrabber, 1);
+ if(clientGrabber == null) {
+ clientGrabber = new SectoredRandomGrabArrayWithInt(rc,
persistent(), container, null);
+ prio.add(clientGrabber, container);
+ if(logMINOR) Logger.minor(this, "Registering retry
count "+rc+" with prioclass "+priorityClass+" on "+clientGrabber+" for "+prio);
+ }
+ // SectoredRandomGrabArrayWithInt and lower down have
hierarchical locking and auto-remove.
+ // To avoid a race condition it is essential to mirror that
here.
+ synchronized(clientGrabber) {
+ // Request
+ SectoredRandomGrabArrayWithObject requestGrabber =
(SectoredRandomGrabArrayWithObject) clientGrabber.getGrabber(client);
+ if(persistent()) container.activate(requestGrabber, 1);
+ if(requestGrabber == null) {
+ requestGrabber = new
SectoredRandomGrabArrayWithObject(client, persistent(), container,
clientGrabber);
+ if(logMINOR)
+ Logger.minor(this, "Creating new
grabber: "+requestGrabber+" for "+client+" from "+clientGrabber+" : "+prio+" :
prio="+priorityClass+", rc="+rc);
+ clientGrabber.addGrabber(client,
requestGrabber, container);
+ }
+ requestGrabber.add(cr, req, container);
+ }
+ }
+
+ /**
+ * Mangle the retry count.
+ * Below a certain number of attempts, we don't prefer one request to
another just because
+ * it's been tried more times. The reason for this is to prevent floods
of low-retry-count
+ * requests from starving other clients' requests which need to be
retried. The other
+ * solution would be to sort by client before retry count, but that
would be excessive
+ * IMHO; we DO want to avoid rerequesting keys we've tried many times
before.
+ */
+ protected int fixRetryCount(int retryCount) {
+ return Math.max(0, retryCount-MIN_RETRY_COUNT);
+ }
+
+ /**
+ * Get SendableRequest's for a given ClientRequester.
+ * Note that this will return all kinds of requests, so the caller will
have
+ * to filter them according to isInsert and isSSKScheduler.
+ */
+ protected SendableRequest[] getSendableRequests(ClientRequester
request, ObjectContainer container) {
+ if(request != null || persistent()) // Client request null is
only legal for transient requests
+ return request.getSendableRequests(container);
+ else return null;
+ }
+
+ void removeFromAllRequestsByClientRequest(SendableRequest req,
ClientRequester cr, boolean dontComplain, ObjectContainer container) {
+ if(cr != null || persistent()) // Client request null is only
legal for transient requests
+ cr.removeFromRequests(req, container, dontComplain);
+ }
+
+ public void reregisterAll(ClientRequester request, RandomSource random,
RequestScheduler lock, ObjectContainer container, ClientContext context) {
+ if(request.persistent() != persistent()) return;
+ SendableRequest[] reqs = getSendableRequests(request,
container);
+
+ if(reqs == null) return;
+ for(int i=0;i<reqs.length;i++) {
+ SendableRequest req = reqs[i];
+ if(persistent())
+ container.activate(req, 1);
+ // FIXME call getSendableRequests() and do the sorting
in ClientRequestScheduler.reregisterAll().
+ if(req.isInsert() != isInsertScheduler || req.isSSK()
!= isSSKScheduler) {
+ container.deactivate(req, 1);
+ continue;
+ }
+ // Unregister from the RGA's, but keep the pendingKeys
and cooldown queue data.
+ req.unregister(container, context);
+ // Then can do innerRegister() (not register()).
+ innerRegister(req, random, container, null);
+ if(persistent())
+ container.deactivate(req, 1);
+ }
+ }
+
+ public void succeeded(BaseSendableGet succeeded, ObjectContainer
container) {
+ // Do nothing.
+ }
+
+ public synchronized void addPendingKeys(KeyListener listener) {
+ if(listener == null) throw new NullPointerException();
+ keyListeners.add(listener);
+ Logger.normal(this, "Added pending keys to "+this+" : size now
"+keyListeners.size()+" : "+listener);
+ }
+
+ public synchronized boolean removePendingKeys(KeyListener listener) {
+ boolean ret = keyListeners.remove(listener);
+ listener.onRemove();
+ Logger.normal(this, "Removed pending keys from "+this+" : size
now "+keyListeners.size()+" : "+listener);
+ return ret;
+ }
+
+ public synchronized boolean removePendingKeys(HasKeyListener
hasListener) {
+ boolean found = false;
+ for(Iterator<KeyListener> i =
keyListeners.iterator();i.hasNext();) {
+ KeyListener listener = i.next();
+ if(listener == null) {
+ i.remove();
+ Logger.error(this, "Null KeyListener in
removePendingKeys()");
+ continue;
+ }
+ if(listener.getHasKeyListener() == hasListener) {
+ found = true;
+ i.remove();
+ listener.onRemove();
+ Logger.normal(this, "Removed pending keys from
"+this+" : size now "+keyListeners.size()+" : "+listener);
+ }
+ }
+ return found;
+ }
+
+ public short getKeyPrio(Key key, short priority, ObjectContainer
container, ClientContext context) {
+ byte[] saltedKey = ((key instanceof NodeSSK) ?
context.getSskFetchScheduler() : context.getChkFetchScheduler()).saltKey(key);
+ ArrayList<KeyListener> matches = null;
+ synchronized(this) {
+ for(KeyListener listener : keyListeners) {
+ if(!listener.probablyWantKey(key, saltedKey))
continue;
+ if(matches == null) matches = new
ArrayList<KeyListener> ();
+ matches.add(listener);
+ }
+ }
+ if(matches == null) return priority;
+ for(KeyListener listener : matches) {
+ short prio = listener.definitelyWantKey(key, saltedKey,
container, sched.clientContext);
+ if(prio == -1) continue;
+ if(prio < priority) priority = prio;
+ }
+ return priority;
+ }
+
+ public synchronized long countWaitingKeys(ObjectContainer container) {
+ long count = 0;
+ for(KeyListener listener : keyListeners)
+ count += listener.countKeys();
+ return count;
+ }
+
+ public boolean anyWantKey(Key key, ObjectContainer container,
ClientContext context) {
+ byte[] saltedKey = ((key instanceof NodeSSK) ?
context.getSskFetchScheduler() : context.getChkFetchScheduler()).saltKey(key);
+ ArrayList<KeyListener> matches = null;
+ synchronized(this) {
+ for(KeyListener listener : keyListeners) {
+ if(!listener.probablyWantKey(key, saltedKey))
continue;
+ if(matches == null) matches = new
ArrayList<KeyListener> ();
+ matches.add(listener);
+ }
+ }
+ if(matches != null) {
+ for(KeyListener listener : matches) {
+ if(listener.definitelyWantKey(key, saltedKey,
container, sched.clientContext) >= 0)
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public synchronized boolean anyProbablyWantKey(Key key, ClientContext
context) {
+ byte[] saltedKey = ((key instanceof NodeSSK) ?
context.getSskFetchScheduler() : context.getChkFetchScheduler()).saltKey(key);
+ for(KeyListener listener : keyListeners) {
+ if(listener.probablyWantKey(key, saltedKey))
+ return true;
+ }
+ return false;
+ }
+
+ private long persistentTruePositives;
+ private long persistentFalsePositives;
+ private long persistentNegatives;
+
+ public boolean tripPendingKey(Key key, KeyBlock block, ObjectContainer
container, ClientContext context) {
+ if((key instanceof NodeSSK) != isSSKScheduler) {
+ Logger.error(this, "Key "+key+" on scheduler
ssk="+isSSKScheduler, new Exception("debug"));
+ }
+ byte[] saltedKey = ((key instanceof NodeSSK) ?
context.getSskFetchScheduler() : context.getChkFetchScheduler()).saltKey(key);
+ ArrayList<KeyListener> matches = null;
+ synchronized(this) {
+ for(KeyListener listener : keyListeners) {
+ if(!listener.probablyWantKey(key, saltedKey))
continue;
+ if(matches == null) matches = new
ArrayList<KeyListener> ();
+ if(matches.contains(listener)) {
+ Logger.error(this, "In matches twice,
presumably in keyListeners twice?: "+listener);
+ continue;
+ }
+ matches.add(listener);
+ }
+ }
+ boolean ret = false;
+ if(matches != null) {
+ for(KeyListener listener : matches) {
+ if(listener.handleBlock(key, saltedKey, block,
container, context))
+ ret = true;
+ if(listener.isEmpty()) {
+ synchronized(this) {
+ keyListeners.remove(listener);
+ }
+ listener.onRemove();
+ }
+ }
+ } else return false;
+ if(ret) {
+ // True positive
+ synchronized(this) {
+ persistentTruePositives++;
+ logFalsePositives("hit");
+ }
+ } else {
+ synchronized(this) {
+ persistentFalsePositives++;
+ logFalsePositives("false");
+ }
+ }
+ return ret;
+ }
+
+ synchronized void countNegative() {
+ persistentNegatives++;
+ if(persistentNegatives % 32 == 0)
+ logFalsePositives("neg");
+ }
+
+ private synchronized void logFalsePositives(String phase) {
+ long totalPositives = persistentFalsePositives +
persistentTruePositives;
+ double percent;
+ if(totalPositives > 0)
+ percent = ((double) persistentFalsePositives) /
totalPositives;
+ else
+ percent = 0;
+ if(!(percent > 2 || logMINOR)) return;
+ StringBuilder buf = new StringBuilder();
+ if(persistent())
+ buf.append("Persistent ");
+ else
+ buf.append("Transient ");
+ buf.append("false positives ");
+ buf.append(phase);
+ buf.append(": ");
+
+ if(totalPositives != 0) {
+ buf.append(percent);
+ buf.append("% ");
+ }
+ buf.append("(false=");
+ buf.append(persistentFalsePositives);
+ buf.append(" true=");
+ buf.append(persistentTruePositives);
+ buf.append(" negatives=");
+ buf.append(persistentNegatives);
+ buf.append(')');
+ if(percent > 10)
+ Logger.error(this, buf.toString());
+ else if(percent > 2)
+ Logger.normal(this, buf.toString());
+ else
+ Logger.minor(this, buf.toString());
+ }
+
+ public SendableGet[] requestsForKey(Key key, ObjectContainer container,
ClientContext context) {
+ ArrayList<SendableGet> list = null;
+ byte[] saltedKey = ((key instanceof NodeSSK) ?
context.getSskFetchScheduler() : context.getChkFetchScheduler()).saltKey(key);
+ synchronized(this) {
+ for(KeyListener listener : keyListeners) {
+ if(!listener.probablyWantKey(key, saltedKey)) continue;
+ SendableGet[] reqs = listener.getRequestsForKey(key,
saltedKey, container, context);
+ if(reqs == null) continue;
+ if(list == null) list = new ArrayList<SendableGet>();
+ for(int i=0;i<reqs.length;i++) list.add(reqs[i]);
+ }
+ }
+ if(list == null) return null;
+ else return list.toArray(new SendableGet[list.size()]);
+ }
+
+ public void onStarted() {
+ keyListeners = new HashSet<KeyListener>();
+ }
+
+ public String toString() {
+ StringBuffer sb = new StringBuffer();
+ sb.append(super.toString());
+ sb.append(':');
+ if(isInsertScheduler)
+ sb.append("insert:");
+ if(isSSKScheduler)
+ sb.append("SSK");
+ else
+ sb.append("CHK");
+ return sb.toString();
+ }
+
+ public synchronized long countQueuedRequests(ObjectContainer container,
ClientContext context) {
+ long total = 0;
+ for(int i=0;i<priorities.length;i++) {
+ SortedVectorByNumber prio = priorities[i];
+ if(prio == null || prio.isEmpty())
+ System.out.println("Priority "+i+" : empty");
+ else {
+ System.out.println("Priority "+i+" :
"+prio.count());
+ for(int j=0;j<prio.count();j++) {
+ int frc = prio.getNumberByIndex(j);
+ System.out.println("Fixed retry count:
"+frc);
+ SectoredRandomGrabArrayWithInt
clientGrabber = (SectoredRandomGrabArrayWithInt) prio.get(frc, container);
+ container.activate(clientGrabber, 1);
+ System.out.println("Clients:
"+clientGrabber.size()+" for "+clientGrabber);
+ for(int k=0;k<clientGrabber.size();k++)
{
+ Object client =
clientGrabber.getClient(k);
+ container.activate(client, 1);
+ System.out.println("Client
"+k+" : "+client);
+ container.deactivate(client, 1);
+
SectoredRandomGrabArrayWithObject requestGrabber =
(SectoredRandomGrabArrayWithObject) clientGrabber.getGrabber(client);
+
container.activate(requestGrabber, 1);
+ System.out.println("SRGA for
client: "+requestGrabber);
+ for(int
l=0;l<requestGrabber.size();l++) {
+ client =
requestGrabber.getClient(l);
+
container.activate(client, 1);
+
System.out.println("Request "+l+" : "+client);
+
container.deactivate(client, 1);
+ RandomGrabArray rga =
(RandomGrabArray) requestGrabber.getGrabber(client);
+ container.activate(rga,
1);
+
System.out.println("Queued SendableRequests: "+rga.size()+" on "+rga);
+ long sendable = 0;
+ long all = 0;
+ for(int
m=0;m<rga.size();m++) {
+ SendableRequest
req = (SendableRequest) rga.get(m, container);
+ if(req == null)
continue;
+
container.activate(req, 1);
+ sendable +=
req.sendableKeys(container, context).length;
+ all +=
req.allKeys(container, context).length;
+
container.deactivate(req, 1);
+ }
+
System.out.println("Sendable keys: "+sendable+" all keys "+all+" diff
"+(all-sendable));
+ total += all;
+
container.deactivate(rga, 1);
+ }
+
container.deactivate(requestGrabber, 1);
+ }
+ container.deactivate(clientGrabber, 1);
+ }
+ }
+ }
+ return total;
+ }
+}
Copied: trunk/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/ClientRequestSchedulerCore.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,822 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+import com.db4o.query.Query;
+
+import freenet.client.FetchContext;
+import freenet.crypt.RandomSource;
+import freenet.keys.ClientKey;
+import freenet.keys.Key;
+import freenet.node.BaseSendableGet;
+import freenet.node.KeysFetchingLocally;
+import freenet.node.Node;
+import freenet.node.RequestStarter;
+import freenet.node.SendableGet;
+import freenet.node.SendableInsert;
+import freenet.node.SendableRequest;
+import freenet.node.SendableRequestItem;
+import freenet.support.LogThresholdCallback;
+import freenet.support.Logger;
+import freenet.support.PrioritizedSerialExecutor;
+import freenet.support.RandomGrabArray;
+import freenet.support.SectoredRandomGrabArrayWithInt;
+import freenet.support.SectoredRandomGrabArrayWithObject;
+import freenet.support.SortedVectorByNumber;
+import freenet.support.io.NativeThread;
+
+/**
+ * @author toad
+ * A persistent class that functions as the core of the ClientRequestScheduler.
+ * Does not refer to any non-persistable classes as member variables: Node
must always
+ * be passed in if we need to use it!
+ */
+class ClientRequestSchedulerCore extends ClientRequestSchedulerBase implements
KeysFetchingLocally {
+
+ /** Identifier in the database for the node we are attached to */
+ private final long nodeDBHandle;
+ final PersistentCooldownQueue persistentCooldownQueue;
+ private transient long initTime;
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ /**
+ * All Key's we are currently fetching.
+ * Locally originated requests only, avoids some complications with
HTL,
+ * and also has the benefit that we can see stuff that's been scheduled
on a SenderThread
+ * but that thread hasn't started yet. FIXME: Both issues can be
avoided: first we'd get
+ * rid of the SenderThread and start the requests directly and
asynchronously, secondly
+ * we'd move this to node but only track keys we are fetching at max
HTL.
+ * LOCKING: Always lock this LAST.
+ */
+ private transient HashSet keysFetching;
+
+ private class RunningTransientInsert {
+
+ final SendableInsert insert;
+ final Object token;
+
+ RunningTransientInsert(SendableInsert i, Object t) {
+ insert = i;
+ token = t;
+ }
+
+ public int hashCode() {
+ return insert.hashCode() ^ token.hashCode();
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof RunningTransientInsert)) return false;
+ RunningTransientInsert r = (RunningTransientInsert) o;
+ return r.insert == insert && (r.token == token ||
r.token.equals(token));
+ }
+
+ }
+
+ private transient HashSet<RunningTransientInsert>
runningTransientInserts;
+
+ public final byte[] globalSalt;
+
+ private transient List<RandomGrabArray> recentSuccesses;
+
+ /**
+ * Fetch a ClientRequestSchedulerCore from the database, or create a
new one.
+ * @param node
+ * @param forInserts
+ * @param forSSKs
+ * @param selectorContainer
+ * @param executor
+ * @return
+ */
+ public static ClientRequestSchedulerCore create(Node node, final
boolean forInserts, final boolean forSSKs, ObjectContainer selectorContainer,
long cooldownTime, PrioritizedSerialExecutor databaseExecutor,
ClientRequestScheduler sched, ClientContext context) {
+ final long nodeDBHandle = node.nodeDBHandle;
+ ObjectSet<ClientRequestSchedulerCore> results =
selectorContainer.query(new Predicate<ClientRequestSchedulerCore>() {
+ public boolean match(ClientRequestSchedulerCore core) {
+ if(core.nodeDBHandle != nodeDBHandle) return
false;
+ if(core.isInsertScheduler != forInserts) return
false;
+ if(core.isSSKScheduler != forSSKs) return false;
+ return true;
+ }
+ });
+ ClientRequestSchedulerCore core;
+ if(results.hasNext()) {
+ core = results.next();
+ selectorContainer.activate(core, 2);
+ System.err.println("Loaded core...");
+ if(core.nodeDBHandle != nodeDBHandle) throw new
IllegalStateException("Wrong nodeDBHandle");
+ if(core.isInsertScheduler != forInserts) throw new
IllegalStateException("Wrong isInsertScheduler");
+ if(core.isSSKScheduler != forSSKs) throw new
IllegalStateException("Wrong forSSKs");
+ } else {
+ core = new ClientRequestSchedulerCore(node, forInserts,
forSSKs, selectorContainer, cooldownTime);
+ selectorContainer.store(core);
+ System.err.println("Created new core...");
+ }
+ core.onStarted(selectorContainer, cooldownTime, sched, context);
+ return core;
+ }
+
+ ClientRequestSchedulerCore(Node node, boolean forInserts, boolean
forSSKs, ObjectContainer selectorContainer, long cooldownTime) {
+ super(forInserts, forSSKs);
+ this.nodeDBHandle = node.nodeDBHandle;
+ if(!forInserts) {
+ this.persistentCooldownQueue = new
PersistentCooldownQueue();
+ } else {
+ this.persistentCooldownQueue = null;
+ }
+ globalSalt = new byte[32];
+ node.random.nextBytes(globalSalt);
+ }
+
+ private void onStarted(ObjectContainer container, long cooldownTime,
ClientRequestScheduler sched, ClientContext context) {
+ super.onStarted();
+ System.err.println("insert scheduler: "+isInsertScheduler);
+ if(!isInsertScheduler) {
+ persistentCooldownQueue.setCooldownTime(cooldownTime);
+ }
+ this.sched = sched;
+ this.initTime = System.currentTimeMillis();
+ // We DO NOT want to rerun the query after consuming the
initial set...
+ if(!isInsertScheduler) {
+ keysFetching = new HashSet();
+ runningTransientInserts = null;
+ this.recentSuccesses = new ArrayList<RandomGrabArray>();
+ } else {
+ keysFetching = null;
+ runningTransientInserts = new
HashSet<RunningTransientInsert>();
+ }
+ if(isInsertScheduler) {
+ preRegisterMeRunner = new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ synchronized(ClientRequestSchedulerCore.this) {
+ if(registerMeSet != null) return;
+ }
+ long tStart = System.currentTimeMillis();
+ // FIXME REDFLAG EVIL DB4O BUG!!!
+ // FIXME verify and file a bug
+ // This code doesn't check the first bit!
+ // I think this is related to the comparator...
+// registerMeSet = container.query(new Predicate()
{
+// public boolean match(RegisterMe reg) {
+// if(reg.core !=
ClientRequestSchedulerCore.this) return false;
+// if(reg.key.addedTime >
initTime) return false;
+// return true;
+// }
+// }, new Comparator() {
+//
+// public int compare(Object arg0, Object
arg1) {
+// RegisterMe reg0 = (RegisterMe)
arg0;
+// RegisterMe reg1 = (RegisterMe)
arg1;
+// RegisterMeSortKey key0 =
reg0.key;
+// RegisterMeSortKey key1 =
reg1.key;
+// return key0.compareTo(key1);
+// }
+//
+// });
+ ObjectSet results = null;
+ for(int
i=RequestStarter.MAXIMUM_PRIORITY_CLASS;i<=RequestStarter.MINIMUM_PRIORITY_CLASS;i++)
{
+ Query query = container.query();
+ query.constrain(RegisterMe.class);
+
query.descend("core").constrain(ClientRequestSchedulerCore.this).and(query.descend("priority").constrain(i));
+ results = query.execute();
+ if(results.hasNext()) {
+ break;
+ } else results = null;
+ }
+ if(results == null)
+ return;
+ // This throws NotSupported.
+//
query.descend("core").constrain(this).identity().
+//
and(query.descend("key").descend("addedTime").constrain(new
Long(initTime)).smaller());
+ /**
+ * FIXME DB4O
+ * db4o says it has indexed core. But then when
we try to query, it produces a diagnostic
+ * suggesting we index it. And of course the
query takes ages and uses tons of RAM. So don't
+ * try to filter by core at this point, deal
with that later.
+ */
+//
query.descend("core").constrain(ClientRequestSchedulerCore.this);
+// Evaluation eval = new Evaluation() {
+//
+// public void evaluate(Candidate
candidate) {
+// RegisterMe reg = (RegisterMe)
candidate.getObject();
+// if(reg.key.addedTime > initTime
|| reg.core != ClientRequestSchedulerCore.this) {
+//
candidate.include(false);
+//
candidate.objectContainer().deactivate(reg.key, 1);
+//
candidate.objectContainer().deactivate(reg, 1);
+// } else {
+// candidate.include(true);
+// }
+// }
+//
+// };
+// query.constrain(eval);
+//
query.descend("key").descend("priority").orderAscending();
+//
query.descend("key").descend("addedTime").orderAscending();
+ synchronized(ClientRequestSchedulerCore.this) {
+ registerMeSet = results;
+ }
+ long tEnd = System.currentTimeMillis();
+ if(logMINOR)
+ Logger.minor(this, "RegisterMe query took
"+(tEnd-tStart)+" hasNext="+registerMeSet.hasNext()+" for
insert="+isInsertScheduler+" ssk="+isSSKScheduler);
+// if(logMINOR)
+// Logger.minor(this, "RegisterMe query
returned: "+registerMeSet.size());
+ boolean boost =
ClientRequestSchedulerCore.this.sched.isQueueAlmostEmpty();
+
+ context.jobRunner.queue(registerMeRunner,
(NativeThread.NORM_PRIORITY-1) + (boost ? 1 : 0), true);
+ }
+
+ };
+ registerMeRunner = new RegisterMeRunner();
+ }
+ }
+
+ private transient DBJob preRegisterMeRunner;
+
+ void start(DBJobRunner runner) {
+ startRegisterMeRunner(runner);
+ }
+
+ private final void startRegisterMeRunner(DBJobRunner runner) {
+ if(isInsertScheduler)
+ runner.queue(preRegisterMeRunner,
NativeThread.NORM_PRIORITY, true);
+ }
+
+ // We pass in the schedTransient to the next two methods so that we can
select between either of them.
+
+ private int removeFirstAccordingToPriorities(boolean tryOfferedKeys,
int fuzz, RandomSource random, OfferedKeysList[] offeredKeys,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
short maxPrio, ObjectContainer container){
+ SortedVectorByNumber result = null;
+
+ short iteration = 0, priority;
+ // we loop to ensure we try every possibilities ( n + 1)
+ //
+ // PRIO will do 0,1,2,3,4,5,6,0
+ // TWEAKED will do rand%6,0,1,2,3,4,5,6
+ while(iteration++ < RequestStarter.NUMBER_OF_PRIORITY_CLASSES +
1){
+ priority = fuzz<0 ?
tweakedPrioritySelector[random.nextInt(tweakedPrioritySelector.length)] :
prioritySelector[Math.abs(fuzz % prioritySelector.length)];
+ if(transientOnly)
+ result = null;
+ else
+ result = priorities[priority];
+ if(result == null)
+ result = schedTransient.priorities[priority];
+ if(priority > maxPrio) {
+ fuzz++;
+ continue; // Don't return because first round
may be higher with soft scheduling
+ }
+ if((result != null) &&
+ (!result.isEmpty()) || (tryOfferedKeys
&& !offeredKeys[priority].isEmpty(container))) {
+ if(logMINOR) Logger.minor(this, "using priority
: "+priority);
+ return priority;
+ }
+
+ if(logMINOR) Logger.minor(this, "Priority "+priority+"
is null (fuzz = "+fuzz+ ')');
+ fuzz++;
+ }
+
+ //FIXME: implement NONE
+ return -1;
+ }
+
+ // LOCKING: ClientRequestScheduler locks on (this) before calling.
+ // We prevent a number of race conditions (e.g. adding a retry count
and then another
+ // thread removes it cos its empty) ... and in addToGrabArray etc we
already sync on this.
+ // The worry is ... is there any nested locking outside of the
hierarchy?
+ ChosenBlock removeFirst(int fuzz, RandomSource random,
OfferedKeysList[] offeredKeys, RequestStarter starter,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
boolean notTransient, short maxPrio, int retryCount, ClientContext context,
ObjectContainer container) {
+ SendableRequest req = removeFirstInner(fuzz, random,
offeredKeys, starter, schedTransient, transientOnly, notTransient, maxPrio,
retryCount, context, container);
+ if(isInsertScheduler && req instanceof SendableGet) {
+ IllegalStateException e = new
IllegalStateException("removeFirstInner returned a SendableGet on an insert
scheduler!!");
+ req.internalError(e, sched, container, context,
req.persistent());
+ throw e;
+ }
+ return maybeMakeChosenRequest(req, container, context);
+ }
+
+ private int ctr;
+
+ public ChosenBlock maybeMakeChosenRequest(SendableRequest req,
ObjectContainer container, ClientContext context) {
+ if(req == null) return null;
+ if(req.isEmpty(container) || req.isCancelled(container)) return
null;
+ SendableRequestItem token = req.chooseKey(this,
req.persistent() ? container : null, context);
+ if(token == null) {
+ return null;
+ } else {
+ Key key;
+ ClientKey ckey;
+ if(isInsertScheduler) {
+ key = null;
+ ckey = null;
+ } else {
+ key = ((BaseSendableGet)req).getNodeKey(token,
persistent() ? container : null);
+ if(req instanceof SendableGet)
+ ckey = ((SendableGet)req).getKey(token,
persistent() ? container : null);
+ else
+ ckey = null;
+ }
+ ChosenBlock ret;
+ assert(!req.persistent());
+ if(key != null && key.getRoutingKey() == null)
+ throw new NullPointerException();
+ boolean localRequestOnly;
+ boolean cacheLocalRequests;
+ boolean ignoreStore;
+ if(req instanceof SendableGet) {
+ SendableGet sg = (SendableGet) req;
+ FetchContext ctx = sg.getContext();
+ if(container != null)
+ container.activate(ctx, 1);
+ localRequestOnly = ctx.localRequestOnly;
+ cacheLocalRequests = ctx.cacheLocalRequests;
+ ignoreStore = ctx.ignoreStore;
+ } else {
+ localRequestOnly = false;
+ cacheLocalRequests = false;
+ ignoreStore = false;
+ }
+ ret = new TransientChosenBlock(req, token, key, ckey,
localRequestOnly, cacheLocalRequests, ignoreStore, sched);
+ return ret;
+ }
+ }
+
+ SendableRequest removeFirstInner(int fuzz, RandomSource random,
OfferedKeysList[] offeredKeys, RequestStarter starter,
ClientRequestSchedulerNonPersistent schedTransient, boolean transientOnly,
boolean notTransient, short maxPrio, int retryCount, ClientContext context,
ObjectContainer container) {
+ // Priorities start at 0
+ if(logMINOR) Logger.minor(this, "removeFirst()");
+ boolean tryOfferedKeys = offeredKeys != null && (!notTransient)
&& random.nextBoolean();
+ int choosenPriorityClass =
removeFirstAccordingToPriorities(tryOfferedKeys, fuzz, random, offeredKeys,
schedTransient, transientOnly, maxPrio, container);
+ if(choosenPriorityClass == -1 && offeredKeys != null &&
(!tryOfferedKeys) && (!notTransient)) {
+ tryOfferedKeys = true;
+ choosenPriorityClass =
removeFirstAccordingToPriorities(tryOfferedKeys, fuzz, random, offeredKeys,
schedTransient, transientOnly, maxPrio, container);
+ }
+ if(choosenPriorityClass == -1) {
+ if(logMINOR)
+ Logger.minor(this, "Nothing to do");
+ return null;
+ }
+ if(maxPrio >= RequestStarter.MINIMUM_PRIORITY_CLASS)
+ maxPrio = RequestStarter.MINIMUM_PRIORITY_CLASS;
+ for(;choosenPriorityClass <= maxPrio;choosenPriorityClass++) {
+ if(logMINOR) Logger.minor(this, "Using priority
"+choosenPriorityClass);
+ if(tryOfferedKeys) {
+ if(offeredKeys[choosenPriorityClass].hasValidKeys(this,
null, context))
+ return offeredKeys[choosenPriorityClass];
+ }
+ SortedVectorByNumber perm = null;
+ if(!transientOnly)
+ perm = priorities[choosenPriorityClass];
+ SortedVectorByNumber trans = null;
+ if(!notTransient)
+ trans = schedTransient.priorities[choosenPriorityClass];
+ if(perm == null && trans == null) {
+ if(logMINOR) Logger.minor(this, "No requests to run:
chosen priority empty");
+ continue; // Try next priority
+ }
+ int permRetryIndex = 0;
+ int transRetryIndex = 0;
+ while(true) {
+ int permRetryCount = perm == null ? Integer.MAX_VALUE :
perm.getNumberByIndex(permRetryIndex);
+ int transRetryCount = trans == null ? Integer.MAX_VALUE
: trans.getNumberByIndex(transRetryIndex);
+ if(choosenPriorityClass == maxPrio) {
+ if(permRetryCount >= retryCount) {
+ permRetryCount = Integer.MAX_VALUE;
+ }
+ if(transRetryCount >= retryCount) {
+ transRetryCount = Integer.MAX_VALUE;
+ }
+ }
+ if(permRetryCount == Integer.MAX_VALUE &&
transRetryCount == Integer.MAX_VALUE) {
+ if(logMINOR) Logger.minor(this, "No requests to
run: ran out of retrycounts on chosen priority");
+ break; // Try next priority
+ }
+ SectoredRandomGrabArrayWithInt chosenTracker = null;
+ SortedVectorByNumber trackerParent = null;
+ if(permRetryCount == transRetryCount) {
+ // Choose between them.
+ SectoredRandomGrabArrayWithInt permRetryTracker
= (SectoredRandomGrabArrayWithInt) perm.getByIndex(permRetryIndex);
+ if(persistent() && permRetryTracker != null)
+ container.activate(permRetryTracker, 1);
+ SectoredRandomGrabArrayWithInt
transRetryTracker = (SectoredRandomGrabArrayWithInt)
trans.getByIndex(transRetryIndex);
+ int permTrackerSize = permRetryTracker.size();
+ int transTrackerSize = transRetryTracker.size();
+ if(permTrackerSize + transTrackerSize == 0) {
+ permRetryIndex++;
+ transRetryIndex++;
+ continue;
+ }
+ if(random.nextInt(permTrackerSize +
transTrackerSize) > permTrackerSize) {
+ chosenTracker = permRetryTracker;
+ trackerParent = perm;
+ permRetryIndex++;
+ } else {
+ chosenTracker = transRetryTracker;
+ trackerParent = trans;
+ transRetryIndex++;
+ }
+ } else if(permRetryCount < transRetryCount) {
+ chosenTracker =
(SectoredRandomGrabArrayWithInt) perm.getByIndex(permRetryIndex);
+ if(persistent() && chosenTracker != null)
+ container.activate(chosenTracker, 1);
+ trackerParent = perm;
+ permRetryIndex++;
+ } else {
+ chosenTracker =
(SectoredRandomGrabArrayWithInt) trans.getByIndex(transRetryIndex);
+ trackerParent = trans;
+ transRetryIndex++;
+ }
+ if(logMINOR)
+ Logger.minor(this, "Got retry count tracker
"+chosenTracker);
+ SendableRequest req = (SendableRequest)
chosenTracker.removeRandom(starter, container, context);
+ if(chosenTracker.isEmpty()) {
+ trackerParent.remove(chosenTracker.getNumber(),
container);
+ if(chosenTracker.persistent())
+ chosenTracker.removeFrom(container);
+ if(trackerParent.isEmpty()) {
+ if(logMINOR) Logger.minor(this, "Should
remove priority");
+ }
+ }
+ if(req == null) {
+ if(logMINOR) Logger.minor(this, "No requests,
adjusted retrycount "+chosenTracker.getNumber()+" ("+chosenTracker+") of
priority "+choosenPriorityClass);
+ continue; // Try next retry count.
+ }
+ if(chosenTracker.persistent())
+ container.activate(req, 1); // FIXME
+ if(req.persistent() != trackerParent.persistent()) {
+ Logger.error(this,
"Request.persistent()="+req.persistent()+" but is in the queue for
persistent="+trackerParent.persistent()+" for "+req);
+ // FIXME fix it
+ }
+ if(req.getPriorityClass(container) !=
choosenPriorityClass) {
+ // Reinsert it : shouldn't happen if we are
calling reregisterAll,
+ // maybe we should ask people to report that
error if seen
+ Logger.normal(this, "In wrong priority class:
"+req+" (req.prio="+req.getPriorityClass(container)+" but
chosen="+choosenPriorityClass+ ')');
+ // Remove it.
+ SectoredRandomGrabArrayWithObject clientGrabber
= (SectoredRandomGrabArrayWithObject)
chosenTracker.getGrabber(req.getClient(container));
+ if(clientGrabber != null) {
+ if(chosenTracker.persistent())
+
container.activate(clientGrabber, 1);
+ RandomGrabArray baseRGA =
(RandomGrabArray) clientGrabber.getGrabber(req.getClientRequest());
+ if(baseRGA != null) {
+ baseRGA.remove(req, container);
+ } else {
+ // Okay, it's been removed
already. Cool.
+ }
+ } else {
+ Logger.error(this, "Could not find
client grabber for client "+req.getClient(container)+" from "+chosenTracker);
+ }
+ if(req.persistent())
+ innerRegister(req, random, container,
null);
+ else
+ schedTransient.innerRegister(req,
random, container, null);
+ continue; // Try the next one on this retry
count.
+ }
+
+ // Check recentSuccesses
+ /** Choose a recently succeeded request.
+ * 50% chance of using a recently succeeded request, if
there is one.
+ * For transient requests, we keep a list of recently
succeeded BaseSendableGet's,
+ * because transient requests are chosen individually.
+ * But for persistent requests, we keep a list of
RandomGrabArray's, because
+ * persistent requests are chosen a whole
SendableRequest at a time.
+ *
+ * FIXME: Only replaces persistent requests with
persistent requests (of similar priority and retry count), or transient with
transient.
+ * Probably this is acceptable.
+ */
+ if(!req.persistent()) {
+ List recent = schedTransient.recentSuccesses;
+ SendableRequest altReq = null;
+ if(!recent.isEmpty()) {
+ if(random.nextBoolean()) {
+ altReq = (BaseSendableGet)
recent.remove(recent.size()-1);
+ }
+ }
+ if(altReq != null &&
(altReq.isCancelled(container) || altReq.isEmpty(container))) {
+ if(logMINOR)
+ Logger.minor(this, "Ignoring
cancelled recently succeeded item "+altReq);
+ altReq = null;
+ }
+ if(altReq != null &&
altReq.getPriorityClass(container) <= choosenPriorityClass &&
+
fixRetryCount(altReq.getRetryCount()) <= chosenTracker.getNumber() &&
!altReq.isEmpty(container) && altReq != req) {
+ // Use the recent one instead
+ if(logMINOR)
+ Logger.minor(this, "Recently
succeeded (transient) req "+altReq+"
(prio="+altReq.getPriorityClass(container)+" retry count
"+altReq.getRetryCount()+") is better than "+req+"
(prio="+req.getPriorityClass(container)+" retry "+req.getRetryCount()+"), using
that");
+ // Don't need to reregister, because
removeRandom doesn't actually remove!
+ req = altReq;
+ } else if(altReq != null) {
+ // Don't use the recent one
+ if(logMINOR)
+ Logger.minor(this, "Chosen req
"+req+" is better, reregistering recently succeeded "+altReq);
+ recent.add(altReq);
+ }
+ } else {
+ RandomGrabArray altRGA = null;
+ synchronized(recentSuccesses) {
+ if(!(recentSuccesses.isEmpty() ||
random.nextBoolean())) {
+ altRGA =
recentSuccesses.remove(0);
+ }
+ }
+ container.activate(altRGA, 1);
+ if(altRGA != null &&
container.ext().isStored(altRGA) && !altRGA.isEmpty()) {
+ container.activate(altRGA, 1);
+ if(logMINOR)
+ Logger.minor(this, "Maybe using
recently succeeded item from "+altRGA);
+ SendableRequest altReq =
(SendableRequest) altRGA.removeRandom(starter, container, context);
+ container.activate(altReq, 1);
+ if(altReq != null) {
+
if(altReq.getPriorityClass(container) <= choosenPriorityClass &&
+
fixRetryCount(altReq.getRetryCount()) <= chosenTracker.getNumber() &&
!altReq.isEmpty(container) && altReq != req) {
+ // Use the recent one
instead
+ if(logMINOR)
+
Logger.minor(this, "Recently succeeded (persistent) req "+altReq+"
(prio="+altReq.getPriorityClass(container)+" retry count
"+altReq.getRetryCount()+") is better than "+req+"
(prio="+req.getPriorityClass(container)+" retry "+req.getRetryCount()+"), using
that");
+ // Don't need to
reregister, because removeRandom doesn't actually remove!
+ req = altReq;
+ } else if(altReq != null) {
+ if(logMINOR)
+
Logger.minor(this, "Chosen (persistent) req "+req+" is better, reregistering
recently succeeded "+altRGA+" for "+altReq);
+
synchronized(recentSuccesses) {
+
recentSuccesses.add(altRGA);
+ }
+ }
+ }
+ }
+ }
+
+ // Now we have chosen a request.
+ if(logMINOR) Logger.minor(this, "removeFirst()
returning "+req+" ("+chosenTracker.getNumber()+", prio "+
+ req.getPriorityClass(container)+",
retries "+req.getRetryCount()+", client "+req.getClient(container)+",
client-req "+req.getClientRequest()+ ')');
+ if(logMINOR) Logger.minor(this, "removeFirst()
returning "+req+" of "+req.getClientRequest());
+ return req;
+
+ }
+ }
+ if(logMINOR) Logger.minor(this, "No requests to run");
+ return null;
+ }
+
+ private static final short[] tweakedPrioritySelector = {
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+
+ RequestStarter.UPDATE_PRIORITY_CLASS,
+ RequestStarter.UPDATE_PRIORITY_CLASS,
+ RequestStarter.UPDATE_PRIORITY_CLASS,
+ RequestStarter.UPDATE_PRIORITY_CLASS,
+
+ RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
+
+ RequestStarter.PREFETCH_PRIORITY_CLASS,
+ RequestStarter.PREFETCH_PRIORITY_CLASS,
+
+ RequestStarter.MINIMUM_PRIORITY_CLASS
+ };
+ private static final short[] prioritySelector = {
+ RequestStarter.MAXIMUM_PRIORITY_CLASS,
+ RequestStarter.INTERACTIVE_PRIORITY_CLASS,
+ RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.UPDATE_PRIORITY_CLASS,
+ RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
+ RequestStarter.PREFETCH_PRIORITY_CLASS,
+ RequestStarter.MINIMUM_PRIORITY_CLASS
+ };
+
+ boolean persistent() {
+ return true;
+ }
+
+ private transient ObjectSet registerMeSet;
+
+ private transient RegisterMeRunner registerMeRunner;
+
+ private transient boolean shouldReRunRegisterMeRunner;
+
+ class RegisterMeRunner implements DBJob {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+
if(sched.databaseExecutor.getQueueSize(NativeThread.NORM_PRIORITY) > 100) {
+ // If the queue isn't empty, reschedule at
NORM-1
+ if(!sched.isQueueAlmostEmpty()) {
+
context.jobRunner.queue(registerMeRunner, NativeThread.NORM_PRIORITY-1, false);
+ return;
+ }
+ }
+ long deadline = System.currentTimeMillis() + 10*1000;
+ if(registerMeSet == null) {
+ Logger.error(this, "registerMeSet is null for
"+ClientRequestSchedulerCore.this+" ( "+this+" )");
+ return;
+ }
+ for(int i=0;i < 1000; i++) {
+ try {
+ if(!registerMeSet.hasNext()) break;
+ } catch (NullPointerException t) {
+ Logger.error(this, "DB4O thew NPE in
hasNext(): "+t, t);
+ // FIXME find some way to get a
reproducible test case... I suspect it won't be easy :<
+
context.jobRunner.queue(preRegisterMeRunner, NativeThread.NORM_PRIORITY, true);
+ return;
+ } catch (ClassCastException t) {
+ // WTF?!?!?!?!?!
+ Logger.error(this, "DB4O thew
ClassCastException in hasNext(): "+t, t);
+ // FIXME find some way to get a
reproducible test case... I suspect it won't be easy :<
+
context.jobRunner.queue(preRegisterMeRunner, NativeThread.NORM_PRIORITY, true);
+ return;
+ }
+ long startNext = System.currentTimeMillis();
+ RegisterMe reg = (RegisterMe)
registerMeSet.next();
+ container.activate(reg, 1);
+ if(reg.bootID == context.bootID) {
+ if(logMINOR) Logger.minor(this, "Not
registering block "+reg+" as was added to the queue");
+ continue;
+ }
+ // FIXME remove the leftover/old core handling
at some point, an NPE is acceptable long-term.
+ if(reg.core != ClientRequestSchedulerCore.this)
{
+ if(!container.ext().isStored(reg)) {
+ if(logMINOR) Logger.minor(this,
"Already deleted RegisterMe "+reg+" - skipping");
+ continue;
+ }
+ if(reg.core == null) {
+ Logger.error(this, "Leftover
RegisterMe "+reg+" : core already deleted. THIS IS AN ERROR unless you have
seen \"Old core not active\" messages before this point.");
+ container.delete(reg);
+ continue;
+ }
+ if(!container.ext().isActive(reg.core))
{
+ Logger.error(this, "Old core
not active in RegisterMe "+reg+" - duplicated cores????");
+ container.delete(reg.core);
+ container.delete(reg);
+ continue;
+ }
+ if(logMINOR)
+ Logger.minor(this, "Ignoring
RegisterMe "+reg+" as doesn't belong to me: my insert="+isInsertScheduler+" my
ssk="+isSSKScheduler+" his insert="+reg.core.isInsertScheduler+" his
ssk="+reg.core.isSSKScheduler);
+ container.deactivate(reg, 1);
+ continue; // Don't delete.
+ }
+// if(reg.key.addedTime > initTime) {
+// if(logMINOR) Logger.minor(this,
"Ignoring RegisterMe as created since startup");
+// container.deactivate(reg.key, 1);
+// container.deactivate(reg, 1);
+// continue; // Don't delete
+// }
+ long endNext = System.currentTimeMillis();
+ if(logMINOR)
+ Logger.minor(this, "RegisterMe: next()
took "+(endNext-startNext));
+
+ if(logMINOR)
+ Logger.minor(this, "Running RegisterMe
"+reg+" for "+reg.nonGetRequest+" : "+reg.addedTime+" : "+reg.priority);
+ // Don't need to activate, fields should exist?
FIXME
+ if(reg.nonGetRequest != null) {
+ container.activate(reg.nonGetRequest,
1);
+
if(reg.nonGetRequest.isCancelled(container)) {
+ Logger.normal(this,
"RegisterMe: request cancelled: "+reg.nonGetRequest);
+ } else {
+ if(logMINOR)
+ Logger.minor(this,
"Registering RegisterMe for insert: "+reg.nonGetRequest);
+
sched.registerInsert(reg.nonGetRequest, true, false, container);
+ }
+ container.delete(reg);
+ container.deactivate(reg.nonGetRequest,
1);
+ }
+ container.deactivate(reg, 1);
+ if(System.currentTimeMillis() > deadline) break;
+ }
+ boolean boost = sched.isQueueAlmostEmpty();
+ if(registerMeSet.hasNext())
+ context.jobRunner.queue(registerMeRunner,
(NativeThread.NORM_PRIORITY-1) + (boost ? 1 : 0), true);
+ else {
+ if(logMINOR) Logger.minor(this,
"RegisterMeRunner finished");
+ synchronized(ClientRequestSchedulerCore.this) {
+ shouldReRunRegisterMeRunner = false;
+ registerMeSet = null;
+ }
+ // Always re-run the query. If there is nothing
to register, it won't call back to us.
+ preRegisterMeRunner.run(container, context);
+ }
+ }
+
+ }
+ /**
+ * @return True unless the key was already present.
+ */
+ public boolean addToFetching(Key key) {
+ synchronized(keysFetching) {
+ boolean retval = keysFetching.add(key);
+ if(!retval) {
+ Logger.normal(this, "Already in keysFetching:
"+key);
+ } else {
+ if(logMINOR)
+ Logger.minor(this, "Added to
keysFetching: "+key);
+ }
+ return retval;
+ }
+ }
+
+ public boolean hasKey(Key key) {
+ if(keysFetching == null) {
+ throw new NullPointerException();
+ }
+ synchronized(keysFetching) {
+ return keysFetching.contains(key);
+ }
+ }
+
+ public void removeFetchingKey(final Key key) {
+ if(logMINOR)
+ Logger.minor(this, "Removing from keysFetching: "+key);
+ if(key != null) {
+ synchronized(keysFetching) {
+ keysFetching.remove(key);
+ }
+ }
+ }
+
+ public void rerunRegisterMeRunner(DBJobRunner runner) {
+ synchronized(this) {
+ shouldReRunRegisterMeRunner = true;
+ if(registerMeSet != null) return;
+ }
+ startRegisterMeRunner(runner);
+ }
+
+ public synchronized long countQueuedRequests(ObjectContainer container,
ClientContext context) {
+ long ret = super.countQueuedRequests(container, context);
+ long cooldown = persistentCooldownQueue.size(container);
+ System.out.println("Cooldown queue size: "+cooldown);
+ return ret + cooldown;
+ }
+
+ public boolean hasTransientInsert(SendableInsert insert, Object token) {
+ RunningTransientInsert tmp = new RunningTransientInsert(insert,
token);
+ synchronized(runningTransientInserts) {
+ return runningTransientInserts.contains(tmp);
+ }
+ }
+
+ public boolean addTransientInsertFetching(SendableInsert insert, Object
token) {
+ RunningTransientInsert tmp = new RunningTransientInsert(insert,
token);
+ synchronized(runningTransientInserts) {
+ boolean retval = runningTransientInserts.add(tmp);
+ if(!retval) {
+ Logger.normal(this, "Already in
runningTransientInserts: "+insert+" : "+token);
+ } else {
+ if(logMINOR)
+ Logger.minor(this, "Added to
runningTransientInserts: "+insert+" : "+token);
+ }
+ return retval;
+ }
+ }
+
+ public void removeTransientInsertFetching(SendableInsert insert, Object
token) {
+ RunningTransientInsert tmp = new RunningTransientInsert(insert,
token);
+ if(logMINOR)
+ Logger.minor(this, "Removing from
runningTransientInserts: "+insert+" : "+token);
+ synchronized(runningTransientInserts) {
+ runningTransientInserts.remove(tmp);
+ }
+ }
+
+ public void succeeded(BaseSendableGet succeeded, ObjectContainer
container) {
+ RandomGrabArray array = succeeded.getParentGrabArray();
+ container.activate(array, 1);
+ if(array == null) return; // Unregistered already?
+ synchronized(recentSuccesses) {
+ if(recentSuccesses.contains(array)) return;
+ recentSuccesses.add(array);
+ while(recentSuccesses.size() > 8)
+ recentSuccesses.remove(0);
+ }
+ }
+
+}
+
Copied:
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerNonPersistent.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/ClientRequestSchedulerNonPersistent.java)
===================================================================
---
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerNonPersistent.java
(rev 0)
+++
trunk/freenet/src/freenet/client/async/ClientRequestSchedulerNonPersistent.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,62 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import com.db4o.ObjectContainer;
+
+import freenet.node.BaseSendableGet;
+import freenet.support.Logger;
+
+/**
+ * Parallel scheduler structures for non-persistent requests.
+ * @author toad
+ */
+class ClientRequestSchedulerNonPersistent extends ClientRequestSchedulerBase {
+
+ private boolean logMINOR;
+
+ protected final List<BaseSendableGet>recentSuccesses;
+
+ ClientRequestSchedulerNonPersistent(ClientRequestScheduler sched,
boolean forInserts, boolean forSSKs) {
+ super(forInserts, forSSKs);
+ this.sched = sched;
+ recentSuccesses = new LinkedList<BaseSendableGet>();
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+
+ boolean persistent() {
+ return false;
+ }
+
+ ObjectContainer container() {
+ return null;
+ }
+
+ public void succeeded(BaseSendableGet succeeded, ObjectContainer
container) {
+ // Do nothing.
+ // FIXME: Keep a list of recently succeeded ClientRequester's.
+ if(isInsertScheduler) return;
+ if(persistent()) {
+ container.activate(succeeded, 1);
+ }
+ if(succeeded.isEmpty(container)) return;
+ if(logMINOR)
+ Logger.minor(this, "Recording successful fetch
from "+succeeded);
+ recentSuccesses.add(succeeded);
+ while(recentSuccesses.size() > 8)
+ recentSuccesses.remove(0);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing
ClientRequestSchedulerNonPersistent in database", new Exception("error"));
+ return false;
+ }
+
+
+}
Modified: trunk/freenet/src/freenet/client/async/ClientRequester.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientRequester.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ClientRequester.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,7 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
+import freenet.node.SendableRequest;
import freenet.support.Logger;
/** A high level client request. A request (either fetch or put) started
@@ -13,29 +17,34 @@
*/
public abstract class ClientRequester {
- public abstract void onTransition(ClientGetState oldState,
ClientGetState newState);
+ public abstract void onTransition(ClientGetState oldState,
ClientGetState newState, ObjectContainer container);
// FIXME move the priority classes from RequestStarter here
protected short priorityClass;
protected boolean cancelled;
- public final ClientRequestScheduler chkScheduler;
- public final ClientRequestScheduler sskScheduler;
- protected final Object client;
+ protected final RequestClient client;
+ protected final SendableRequestSet requests;
public short getPriorityClass() {
return priorityClass;
}
- protected ClientRequester(short priorityClass, ClientRequestScheduler
chkScheduler, ClientRequestScheduler sskScheduler, Object client) {
+ protected ClientRequester(short priorityClass, RequestClient client) {
this.priorityClass = priorityClass;
- this.chkScheduler = chkScheduler;
- this.sskScheduler = sskScheduler;
this.client = client;
+ if(client == null)
+ throw new NullPointerException();
+ hashCode = super.hashCode(); // the old object id will do fine,
as long as we ensure it doesn't change!
+ requests = persistent() ? new PersistentSendableRequestSet() :
new TransientSendableRequestSet();
}
- public synchronized void cancel() {
+ synchronized boolean cancel() {
+ boolean ret = cancelled;
cancelled = true;
+ return ret;
}
+
+ public abstract void cancel(ObjectContainer container, ClientContext
context);
public boolean isCancelled() {
return cancelled;
@@ -44,6 +53,15 @@
public abstract FreenetURI getURI();
public abstract boolean isFinished();
+
+ private final int hashCode;
+
+ /**
+ * We need a hash code that persists across restarts.
+ */
+ public int hashCode() {
+ return hashCode;
+ }
/** Total number of blocks this request has tried to fetch/put. */
protected int totalBlocks;
@@ -58,17 +76,19 @@
/** Has totalBlocks stopped growing? */
protected boolean blockSetFinalized;
- public void blockSetFinalized() {
+ public void blockSetFinalized(ObjectContainer container, ClientContext
context) {
synchronized(this) {
if(blockSetFinalized) return;
blockSetFinalized = true;
}
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Finalized set of blocks for "+this,
new Exception("debug"));
- notifyClients();
+ if(persistent())
+ container.store(this);
+ notifyClients(container, context);
}
- public void addBlock() {
+ public void addBlock(ObjectContainer container) {
boolean wasFinalized;
synchronized (this) {
totalBlocks++;
@@ -82,65 +102,118 @@
Logger.error(this, "addBlock() but set
finalized! on " + this, new Exception("error"));
}
- if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"addBlock(): total="+totalBlocks+" successful="+successfulBlocks+"
failed="+failedBlocks+" required="+minSuccessBlocks);
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"addBlock(): total="+totalBlocks+" successful="+successfulBlocks+"
failed="+failedBlocks+" required="+minSuccessBlocks);
+ if(persistent()) container.store(this);
}
- public void addBlocks(int num) {
+ public void addBlocks(int num, ObjectContainer container) {
boolean wasFinalized;
synchronized (this) {
totalBlocks += num;
wasFinalized = blockSetFinalized;
}
- if (wasFinalized)
+ if (wasFinalized) {
if(Logger.globalGetThreshold() > Logger.MINOR)
Logger.error(this, "addBlocks() but set
finalized! on "+this);
else
Logger.error(this, "addBlocks() but set
finalized! on "+this, new Exception("error"));
+ }
if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"addBlocks("+num+"): total="+totalBlocks+" successful="+successfulBlocks+"
failed="+failedBlocks+" required="+minSuccessBlocks);
+ if(persistent()) container.store(this);
}
- public void completedBlock(boolean dontNotify) {
+ public void completedBlock(boolean dontNotify, ObjectContainer
container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Completed block ("+dontNotify+ "):
total="+totalBlocks+" success="+successfulBlocks+" failed="+failedBlocks+"
fatally="+fatallyFailedBlocks+" finalised="+blockSetFinalized+"
required="+minSuccessBlocks+" on "+this);
synchronized(this) {
+ if(cancelled) return;
successfulBlocks++;
- if(dontNotify) return;
}
- notifyClients();
+ if(persistent()) container.store(this);
+ if(dontNotify) return;
+ notifyClients(container, context);
}
- public void failedBlock() {
+ public void failedBlock(ObjectContainer container, ClientContext
context) {
synchronized(this) {
failedBlocks++;
}
- notifyClients();
+ if(persistent()) container.store(this);
+ notifyClients(container, context);
}
- public void fatallyFailedBlock() {
+ public void fatallyFailedBlock(ObjectContainer container, ClientContext
context) {
synchronized(this) {
fatallyFailedBlocks++;
}
- notifyClients();
+ if(persistent()) container.store(this);
+ notifyClients(container, context);
}
- public synchronized void addMustSucceedBlocks(int blocks) {
+ public synchronized void addMustSucceedBlocks(int blocks,
ObjectContainer container) {
minSuccessBlocks += blocks;
+ if(persistent()) container.store(this);
if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"addMustSucceedBlocks("+blocks+"): total="+totalBlocks+"
successful="+successfulBlocks+" failed="+failedBlocks+"
required="+minSuccessBlocks);
}
- public abstract void notifyClients();
+ public abstract void notifyClients(ObjectContainer container,
ClientContext context);
/** Get client context object */
- public Object getClient() {
+ public RequestClient getClient() {
return client;
}
- public void setPriorityClass(short newPriorityClass) {
+ public void setPriorityClass(short newPriorityClass, ClientContext ctx,
ObjectContainer container) {
this.priorityClass = newPriorityClass;
- chkScheduler.reregisterAll(this);
- sskScheduler.reregisterAll(this);
+ ctx.getChkFetchScheduler().reregisterAll(this, container);
+ ctx.getChkInsertScheduler().reregisterAll(this, container);
+ ctx.getSskFetchScheduler().reregisterAll(this, container);
+ ctx.getSskInsertScheduler().reregisterAll(this, container);
+ if(persistent()) container.store(this);
}
+ public boolean persistent() {
+ return client.persistent();
+ }
+
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.activate(requests, 1);
+ requests.removeFrom(container);
+ container.delete(this);
+ }
+
+ public void objectOnActivate(ObjectContainer container) {
+ container.activate(client, 1);
+ }
+
+ public void addToRequests(SendableRequest req, ObjectContainer
container) {
+ if(persistent())
+ container.activate(requests, 1);
+ requests.addRequest(req, container);
+ if(persistent())
+ container.deactivate(requests, 1);
+ }
+
+ public SendableRequest[] getSendableRequests(ObjectContainer container)
{
+ if(persistent())
+ container.activate(requests, 1);
+ SendableRequest[] reqs = requests.listRequests(container);
+ if(persistent())
+ container.deactivate(requests, 1);
+ return reqs;
+ }
+
+ public void removeFromRequests(SendableRequest req, ObjectContainer
container, boolean dontComplain) {
+ if(persistent())
+ container.activate(requests, 1);
+ if(!requests.removeRequest(req, container) && !dontComplain) {
+ Logger.error(this, "Not in request list for "+this+":
"+req);
+ }
+ if(persistent())
+ container.deactivate(requests, 1);
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/CooldownQueue.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/CooldownQueue.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/CooldownQueue.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/CooldownQueue.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,33 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+import freenet.keys.Key;
+import freenet.node.SendableGet;
+
+public interface CooldownQueue {
+
+ /**
+ * Add a key to the end of the queue. Returns the time at which it will
be valid again.
+ */
+ public abstract long add(Key key, SendableGet client, ObjectContainer
container);
+
+ /**
+ * Remove a key whose cooldown time has passed.
+ * @param dontCareAfter If the next item to come out of the cooldown
+ * queue is more than this many millis after now, return null.
+ * @return Either an array of Key's or a Long indicating the time at
+ * which the next key will be removed from the cooldown, or null if
+ * no keys have passed their cooldown time.
+ */
+ public abstract Object removeKeyBefore(long now, long dontCareAfter,
ObjectContainer container, int maxKeys);
+
+ /**
+ * @return True if the key was found.
+ */
+ public abstract boolean removeKey(Key key, SendableGet client, long
time, ObjectContainer container);
+
+}
\ No newline at end of file
Copied: trunk/freenet/src/freenet/client/async/DBJob.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/async/DBJob.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/DBJob.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/DBJob.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,17 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+/**
+ * A job to be run on the database thread. We will pass a transactional
context in,
+ * and a RequestScheduler.
+ * @author toad
+ */
+public interface DBJob {
+
+ void run(ObjectContainer container, ClientContext context);
+
+}
Copied: trunk/freenet/src/freenet/client/async/DBJobRunner.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/DBJobRunner.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/DBJobRunner.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/DBJobRunner.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,34 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+/**
+ * Interface for an object which queues and runs DBJob's.
+ * @author toad
+ */
+public interface DBJobRunner {
+
+ public void queue(DBJob job, int priority, boolean checkDupes);
+
+ /** Run this database job blocking. If we are already on the database
thread,
+ * run it inline, otherwise schedule it at the specified priority and
wait for
+ * it to finish. */
+ public void runBlocking(DBJob job, int priority);
+
+ public boolean onDatabaseThread();
+
+ public int getQueueSize(int priority);
+
+ /** Queue a database job to be executed just after restart.
+ * @param early If true, the job will be run just after startup, at
HIGH priority; the priority
+ * given determines the order of such jobs. If false, it will be queued
to the database job
+ * scheduler at the given priority. Late jobs are responsible for
removing themselves! */
+ public void queueRestartJob(DBJob job, int priority, ObjectContainer
container, boolean early);
+
+ /** Remove a queued on-restart database job. */
+ public void removeRestartJob(DBJob job, int priority, ObjectContainer
container);
+
+}
Copied: trunk/freenet/src/freenet/client/async/DatastoreChecker.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/DatastoreChecker.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/DatastoreChecker.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/DatastoreChecker.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,431 @@
+package freenet.client.async;
+
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Query;
+
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.keys.NodeSSK;
+import freenet.node.Node;
+import freenet.node.PrioRunnable;
+import freenet.node.RequestStarter;
+import freenet.node.SendableGet;
+import freenet.support.Executor;
+import freenet.support.Logger;
+import freenet.support.io.NativeThread;
+
+/**
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class DatastoreChecker implements PrioRunnable {
+
+ static final int MAX_PERSISTENT_KEYS = 1024;
+
+ /** List of arrays of keys to check for persistent requests. PARTIAL:
+ * When we run out we will look up some more DatastoreCheckerItem's. */
+ private final ArrayList<Key[]>[] persistentKeys;
+ /** List of persistent requests which we will call finishRegister() for
+ * when we have checked the keys lists. PARTIAL: When we run out we
+ * will look up some more DatastoreCheckerItem's. Deactivated. */
+ private final ArrayList<SendableGet>[] persistentGetters;
+ private final ArrayList<Boolean>[] persistentDontCache;
+ private final ArrayList<ClientRequestScheduler>[] persistentSchedulers;
+ private final ArrayList<DatastoreCheckerItem>[] persistentCheckerItems;
+ private final ArrayList<BlockSet>[] persistentBlockSets;
+
+ /** List of arrays of keys to check for transient requests. */
+ private final ArrayList<Key[]>[] transientKeys;
+ /** List of transient requests which we will call finishRegister() for
+ * when we have checked the keys lists. */
+ private final ArrayList<SendableGet>[] transientGetters;
+ private final ArrayList<BlockSet>[] transientBlockSets;
+
+ private ClientContext context;
+ private final Node node;
+
+ public synchronized void setContext(ClientContext context) {
+ this.context = context;
+ }
+
+ public DatastoreChecker(Node node) {
+ this.node = node;
+ int priorities = RequestStarter.NUMBER_OF_PRIORITY_CLASSES;
+ persistentKeys = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentKeys[i] = new ArrayList<Key[]>();
+ persistentGetters = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentGetters[i] = new ArrayList<SendableGet>();
+ persistentDontCache = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentDontCache[i] = new ArrayList<Boolean>();
+ persistentSchedulers = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentSchedulers[i] = new
ArrayList<ClientRequestScheduler>();
+ persistentCheckerItems = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentCheckerItems[i] = new
ArrayList<DatastoreCheckerItem>();
+ persistentBlockSets = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ persistentBlockSets[i] = new ArrayList<BlockSet>();
+ transientKeys = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ transientKeys[i] = new ArrayList<Key[]>();
+ transientGetters = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ transientGetters[i] = new ArrayList<SendableGet>();
+ transientBlockSets = new ArrayList[priorities];
+ for(int i=0;i<priorities;i++)
+ transientBlockSets[i] = new ArrayList<BlockSet>();
+ }
+
+ private final DBJob loader = new DBJob() {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+ loadPersistentRequests(container, context);
+ }
+
+ };
+
+ public void loadPersistentRequests(ObjectContainer container, final
ClientContext context) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ int totalSize = 0;
+ synchronized(this) {
+ for(int i=0;i<persistentKeys.length;i++) {
+ for(int j=0;j<persistentKeys[i].size();j++)
+ totalSize +=
persistentKeys[i].get(j).length;
+ }
+ if(totalSize > MAX_PERSISTENT_KEYS) {
+ if(logMINOR) Logger.minor(this, "Persistent
datastore checker queue alreadyfull");
+ return;
+ }
+ }
+ for(short p = RequestStarter.MAXIMUM_PRIORITY_CLASS; p <=
RequestStarter.MINIMUM_PRIORITY_CLASS; p++) {
+ final short prio = p;
+ Query query = container.query();
+ query.constrain(DatastoreCheckerItem.class);
+
query.descend("nodeDBHandle").constrain(context.nodeDBHandle).
+ and(query.descend("prio").constrain(prio));
+ ObjectSet<DatastoreCheckerItem> results =
query.execute();
+ for(DatastoreCheckerItem item : results) {
+ if(item.chosenBy == context.bootID) continue;
+ SendableGet getter = item.getter;
+ if(getter == null) {
+ // FIXME is this normal or isn't it?
Probably ... if we don't always delete the DCI's ...
+ if(logMINOR) Logger.minor(this,
"Ignoring DatastoreCheckerItem because the SendableGet has already been deleted
from the database");
+ container.delete(item);
+ continue;
+ }
+ BlockSet blocks = item.blocks;
+ if(!container.ext().isStored(getter)) continue;
// Already deleted
+ container.activate(getter, 1);
+ boolean dontCache = getter.dontCache(container);
+ ClientRequestScheduler sched =
getter.getScheduler(context);
+ synchronized(this) {
+
if(persistentGetters[prio].contains(getter)) continue;
+ }
+ Key[] keys = getter.listKeys(container);
+ // FIXME check the store bloom filter using
store.probablyInStore().
+ item.chosenBy = context.bootID;
+ container.store(item);
+ synchronized(this) {
+
if(persistentGetters[prio].contains(getter)) continue;
+ ArrayList<Key> finalKeysToCheck = new
ArrayList<Key>();
+ for(Key key : keys) {
+ key = key.cloneKey();
+ finalKeysToCheck.add(key);
+ }
+ Key[] finalKeys =
+ finalKeysToCheck.toArray(new
Key[finalKeysToCheck.size()]);
+ persistentKeys[prio].add(finalKeys);
+ persistentGetters[prio].add(getter);
+
persistentDontCache[prio].add(dontCache);
+ persistentSchedulers[prio].add(sched);
+ persistentCheckerItems[prio].add(item);
+ persistentBlockSets[prio].add(blocks);
+ if(totalSize == 0)
+ notifyAll();
+ totalSize += finalKeys.length;
+ if(totalSize > MAX_PERSISTENT_KEYS) {
+ if(trimPersistentQueue(prio,
container)) return;
+ notifyAll();
+ }
+ }
+ container.deactivate(getter, 1);
+ }
+ }
+ }
+
+ /**
+ * Trim the queue of persistent requests until it is just over the
limit.
+ * @param minPrio Only drop from priorities lower than this one.
+ * @return True unless the queue is under the limit.
+ */
+ private boolean trimPersistentQueue(short prio, ObjectContainer
container) {
+ synchronized(this) {
+ int preQueueSize = 0;
+ for(int i=0;i<prio;i++) {
+ for(int x=0;x<persistentKeys[i].size();x++)
+ preQueueSize +=
persistentKeys[i].get(x).length;
+ }
+ if(preQueueSize > MAX_PERSISTENT_KEYS) {
+ // Dump everything
+ for(int i=prio+1;i<persistentKeys.length;i++) {
+ while(!persistentKeys[i].isEmpty()) {
+ int idx =
persistentKeys[i].size() - 1;
+ DatastoreCheckerItem item =
persistentCheckerItems[i].remove(idx);
+
persistentSchedulers[i].remove(idx);
+
persistentDontCache[i].remove(idx);
+
persistentGetters[i].remove(idx);
+ persistentKeys[i].remove(idx);
+
persistentBlockSets[i].remove(idx);
+ item.chosenBy = 0;
+ container.store(item);
+ }
+ }
+ return true;
+ } else {
+ int postQueueSize = 0;
+ for(int i=prio+1;i<persistentKeys.length;i++) {
+ for(int
x=0;x<persistentKeys[i].size();x++)
+ postQueueSize +=
persistentKeys[i].get(x).length;
+ }
+ if(postQueueSize + preQueueSize <
MAX_PERSISTENT_KEYS)
+ return false;
+ // Need to dump some stuff.
+ for(int i=persistentKeys.length-1;i>prio;i--) {
+ while(!persistentKeys[i].isEmpty()) {
+ int idx =
persistentKeys[i].size() - 1;
+ DatastoreCheckerItem item =
persistentCheckerItems[i].remove(idx);
+
persistentSchedulers[i].remove(idx);
+
persistentDontCache[i].remove(idx);
+
persistentGetters[i].remove(idx);
+ Key[] keys =
persistentKeys[i].remove(idx);
+
persistentBlockSets[i].remove(idx);
+ item.chosenBy = 0;
+ container.store(item);
+ if(postQueueSize + preQueueSize
- keys.length < MAX_PERSISTENT_KEYS) {
+ return false;
+ }
+ }
+ }
+ // Still over the limit.
+ return true;
+ }
+ }
+ }
+
+ public void queueTransientRequest(SendableGet getter, BlockSet blocks) {
+ Key[] checkKeys = getter.listKeys(null);
+ short prio = getter.getPriorityClass(null);
+ // FIXME check using store.probablyInStore
+ ArrayList<Key> finalKeysToCheck = new ArrayList<Key>();
+ synchronized(this) {
+ for(Key key : checkKeys) {
+ finalKeysToCheck.add(key);
+ }
+ transientGetters[prio].add(getter);
+ transientKeys[prio].add(finalKeysToCheck.toArray(new
Key[finalKeysToCheck.size()]));
+ transientBlockSets[prio].add(blocks);
+ notifyAll();
+ }
+ }
+
+ /**
+ * Queue a persistent request. We will store a DatastoreCheckerItem,
then
+ * check the datastore (on the datastore checker thread), and then call
+ * finishRegister() (on the database thread). Caller must have already
+ * stored and registered the HasKeyListener if any.
+ * @param getter
+ */
+ public void queuePersistentRequest(SendableGet getter, BlockSet blocks,
ObjectContainer container) {
+ Key[] checkKeys = getter.listKeys(container);
+ short prio = getter.getPriorityClass(container);
+ boolean dontCache = getter.dontCache(container);
+ ClientRequestScheduler sched = getter.getScheduler(context);
+ DatastoreCheckerItem item = new DatastoreCheckerItem(getter,
context.nodeDBHandle, prio, blocks);
+ container.store(item);
+ container.activate(blocks, 5);
+ synchronized(this) {
+ // FIXME only add if queue not full.
+ int queueSize = 0;
+ // Only count queued keys at no higher priority than
this request.
+ for(short p = 0;p<=prio;p++) {
+ for(int x = 0;x<persistentKeys[p].size();x++) {
+ queueSize +=
persistentKeys[p].get(x).length;
+ }
+ }
+ if(queueSize > MAX_PERSISTENT_KEYS) return;
+ item.chosenBy = context.bootID;
+ container.store(item);
+ // FIXME check using store.probablyInStore
+ ArrayList<Key> finalKeysToCheck = new ArrayList<Key>();
+ for(Key key : checkKeys) {
+ finalKeysToCheck.add(key);
+ }
+ persistentGetters[prio].add(getter);
+ persistentKeys[prio].add(finalKeysToCheck.toArray(new
Key[finalKeysToCheck.size()]));
+ persistentDontCache[prio].add(dontCache);
+ persistentSchedulers[prio].add(sched);
+ persistentCheckerItems[prio].add(item);
+ persistentBlockSets[prio].add(blocks);
+ trimPersistentQueue(prio, container);
+ notifyAll();
+ }
+ }
+
+ public void run() {
+ while(true) {
+ try {
+ realRun();
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+" in datastore
checker thread", t);
+ }
+ }
+ }
+
+ private void realRun() {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ Key[] keys = null;
+ SendableGet getter = null;
+ boolean persistent = false;
+ boolean dontCache = false;
+ ClientRequestScheduler sched = null;
+ DatastoreCheckerItem item = null;
+ BlockSet blocks = null;
+ short priority = -1;
+ // If the queue is too large, don't check any more blocks. It
is possible
+ // that we can check the datastore faster than we can handle
the resulting
+ // blocks, this will cause OOM.
+ int queueSize =
context.jobRunner.getQueueSize(ClientRequestScheduler.TRIP_PENDING_PRIORITY);
+ if(queueSize > 500) {
+ // If the queue is over 500, don't run the datastore
checker at all.
+ // It's entirely possible that looking up blocks in the
store will
+ // make the situation first, because a key which is
queued for a
+ // non-persistent request may also be used by a
persistent one.
+
+ // FIXME consider setting a flag to not only only check
transient
+ // requests, but also check whether the keys are in the
persistent
+ // bloom filters first, and if they are not check them.
+ try {
+ Thread.sleep(10*1000);
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ return;
+ }
+ // If it's over 100, don't check blocks from persistent
requests.
+ boolean notPersistent = queueSize > 100;
+ synchronized(this) {
+ while(true) {
+ for(short prio =
0;prio<transientKeys.length;prio++) {
+ if(!transientKeys[prio].isEmpty()) {
+ keys =
transientKeys[prio].remove(0);
+ getter =
transientGetters[prio].remove(0);
+ persistent = false;
+ item = null;
+ blocks =
transientBlockSets[prio].remove(0);
+ priority = prio;
+ break;
+ } else if((!notPersistent) &&
(!persistentGetters[prio].isEmpty())) {
+ keys =
persistentKeys[prio].remove(0);
+ getter =
persistentGetters[prio].remove(0);
+ persistent = true;
+ dontCache =
persistentDontCache[prio].remove(0);
+ sched =
persistentSchedulers[prio].remove(0);
+ item =
persistentCheckerItems[prio].remove(0);
+ blocks =
persistentBlockSets[prio].remove(0);
+ priority = prio;
+ break;
+ }
+ }
+ if(keys == null) {
+ try {
+ wait(100*1000);
+ } catch (InterruptedException e) {
+ // Ok
+ }
+ context.jobRunner.queue(loader,
NativeThread.HIGH_PRIORITY, true);
+ continue;
+ }
+ break;
+ }
+ }
+ if(!persistent) {
+ dontCache = getter.dontCache(null);
+ sched = getter.getScheduler(context);
+ }
+ boolean anyValid = false;
+ for(Key key : keys) {
+ KeyBlock block = null;
+ if(blocks != null)
+ block = blocks.get(key);
+ if(blocks == null)
+ block = node.fetch(key, dontCache);
+ if(block != null) {
+ if(logMINOR) Logger.minor(this, "Found key");
+ if(key instanceof NodeSSK)
+ sched.tripPendingKey(block);
+ else // CHK
+ sched.tripPendingKey(block);
+ } else {
+ anyValid = true;
+ }
+// synchronized(this) {
+// keysToCheck[priority].remove(key);
+// }
+ }
+ if(persistent)
+ context.jobRunner.queue(loader,
NativeThread.HIGH_PRIORITY, true);
+ if(persistent) {
+ final SendableGet get = getter;
+ final ClientRequestScheduler scheduler = sched;
+ final boolean valid = anyValid;
+ final DatastoreCheckerItem it = item;
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(container.ext().isActive(get)) {
+ Logger.error(this, "ALREADY
ACTIVATED: "+get);
+ }
+ if(!container.ext().isStored(get)) {
+ // Completed and deleted
already.
+
if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this,
"Already deleted from database");
+ container.delete(it);
+ return;
+ }
+ container.activate(get, 1);
+ scheduler.finishRegister(new
SendableGet[] { get }, true, true, container, valid, it);
+ container.deactivate(get, 1);
+ loader.run(container, context);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ } else {
+ sched.finishRegister(new SendableGet[] { getter },
false, false, null, anyValid, item);
+ }
+ }
+
+ synchronized void wakeUp() {
+ notifyAll();
+ }
+
+ public void start(Executor executor, String name) {
+ context.jobRunner.queue(loader, NativeThread.HIGH_PRIORITY-1,
true);
+ executor.execute(this, name);
+ }
+
+ public int getPriority() {
+ return NativeThread.NORM_PRIORITY;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing DatastoreChecker in database",
new Exception("error"));
+ return false;
+ }
+
+}
Copied: trunk/freenet/src/freenet/client/async/DatastoreCheckerItem.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/DatastoreCheckerItem.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/DatastoreCheckerItem.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/DatastoreCheckerItem.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,26 @@
+package freenet.client.async;
+
+import freenet.node.SendableGet;
+
+/**
+ * Persistent tag for a persistent request which needs to check the datastore
+ * and then be registered.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ *
+ */
+public class DatastoreCheckerItem {
+
+ final long nodeDBHandle;
+ final SendableGet getter;
+ final short prio;
+ long chosenBy;
+ final BlockSet blocks;
+
+ DatastoreCheckerItem(SendableGet getter, long nodeDBHandle, short prio,
BlockSet blocks) {
+ this.getter = getter;
+ this.nodeDBHandle = nodeDBHandle;
+ this.prio = prio;
+ this.blocks = blocks;
+ }
+
+}
Copied: trunk/freenet/src/freenet/client/async/Db4oBugs.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/async/Db4oBugs.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/Db4oBugs.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/Db4oBugs.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,44 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Query;
+
+/** A collection of bug workarounds for everyone's favourite object database!
*/
+public class Db4oBugs {
+
+ public static<T extends Object> ObjectSet<T> query(ObjectContainer
container, Class<T> clazz) {
+ // db4o 7.4.84.12673 throws a RuntimeException: Not supported
+ // when we use this documented API to query all elements of a
class. Isn't that great? :(
+ // FIXME file a bug with db4o
+// ObjectSet<HasKeyListener> results =
+// container.query(HasKeyListener.class);
+ Query query = container.query();
+ query.constrain(HasKeyListener.class);
+ return query.execute();
+ }
+
+ /* http://tracker.db4o.com/browse/COR-1436
+ * ArrayList's etc must be stored with:
+ *
+ * container.ext().store(list, 2)
+ *
+ * Otherwise everything contained in the arraylist is updated to depth
3, which
+ * is not usually what we want, and can be catastrophic due to storing
deactivated
+ * objects and/or using up lots of memory. */
+
+ /* http://tracker.db4o.com/browse/COR-1582
+ * Never activate a HashMap to depth 1. This will not only result in
its being
+ * empty, but activating it to depth 2 and thus loading its elements
will not be
+ * possible unless you deactivate it first. Combined with the previous
bug this
+ * can cause *really* annoying bugs: maps apparently spontaneously
clearing
+ * themselves, actual cause was it was accidentally activated to depth
1, and then
+ * was accidentally stored by e.g. being moved from one ArrayList to
another with
+ * the previous bug. */
+
+ /* We are using an oldish version of db4o 7.4 in ext #26 because the
newer versions,
+ * e.g. in ext-27pre2, have *really* horrible bugs - all sorts of wierd
things
+ * happen with them, the FEC object duplication bug happens with File's
as well,
+ * there seem to be random object disappearances, there are many many
horrible
+ * things... */
+}
Copied: trunk/freenet/src/freenet/client/async/Encodeable.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/async/Encodeable.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/Encodeable.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/Encodeable.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,14 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+public interface Encodeable {
+
+ /** Attempt to encode the block, if necessary */
+ public void tryEncode(ObjectContainer container, ClientContext context);
+
+ public boolean persistent();
+
+ public short getPriorityClass(ObjectContainer container);
+
+}
Modified: trunk/freenet/src/freenet/client/async/GetCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/GetCompletionCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/GetCompletionCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -12,21 +14,21 @@
*/
public interface GetCompletionCallback {
- public void onSuccess(FetchResult result, ClientGetState state);
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context);
- public void onFailure(FetchException e, ClientGetState state);
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context);
/** Called when the ClientGetState knows that it knows about
* all the blocks it will need to fetch.
*/
- public void onBlockSetFinished(ClientGetState state);
+ public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context);
- public void onTransition(ClientGetState oldState, ClientGetState
newState);
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container);
- public void onExpectedSize(long size);
+ public void onExpectedSize(long size, ObjectContainer container);
- public void onExpectedMIME(String mime);
+ public void onExpectedMIME(String mime, ObjectContainer container);
- public void onFinalizedMetadata();
+ public void onFinalizedMetadata(ObjectContainer container);
}
Copied: trunk/freenet/src/freenet/client/async/HasKeyListener.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/HasKeyListener.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/HasKeyListener.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/HasKeyListener.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,30 @@
+package freenet.client.async;
+
+import java.io.IOException;
+
+import com.db4o.ObjectContainer;
+
+/**
+ * Interface to show that we can create a KeyListener callback.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public interface HasKeyListener {
+
+ /**
+ * Create a KeyListener, a transient object used to determine which
keys we
+ * want, and to handle any blocks found.
+ * @return Null if the HasKeyListener is finished/cancelled/etc.
+ * @throws IOException
+ */
+ KeyListener makeKeyListener(ObjectContainer container, ClientContext
context) throws KeyListenerConstructionException;
+
+ /**
+ * Is it cancelled?
+ */
+ boolean isCancelled(ObjectContainer container);
+
+ /**
+ * Notify that makeKeyListener() failed.
+ */
+ void onFailed(KeyListenerConstructionException e, ObjectContainer
container, ClientContext context);
+}
Modified: trunk/freenet/src/freenet/client/async/HealingQueue.java
===================================================================
--- trunk/freenet/src/freenet/client/async/HealingQueue.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/HealingQueue.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -8,6 +8,6 @@
public interface HealingQueue {
/** Queue a Bucket of data to insert as a CHK. */
- void queue(Bucket data);
+ void queue(Bucket data, ClientContext context);
}
Copied: trunk/freenet/src/freenet/client/async/InsertCompressor.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/InsertCompressor.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/InsertCompressor.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/InsertCompressor.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,310 @@
+package freenet.client.async;
+
+import java.io.IOException;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Query;
+
+import freenet.client.InsertException;
+import freenet.keys.CHKBlock;
+import freenet.keys.NodeCHK;
+import freenet.node.PrioRunnable;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+import freenet.support.compress.CompressJob;
+import freenet.support.compress.CompressionOutputSizeException;
+import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
+import freenet.support.io.BucketChainBucketFactory;
+import freenet.support.io.NativeThread;
+
+/**
+ * Compress a file in order to insert it. This class acts as a tag in the
database to ensure that inserts
+ * are not forgotten about, and also can be run on a non-database thread from
an executor.
+ *
+ * FIXME how many compressors do we want to have running simultaneously?
Probably we should have a compression
+ * queue, or at least a SerialExecutor?
+ *
+ * @author toad
+ */
+public class InsertCompressor implements CompressJob {
+
+ /** Database handle to identify which node it belongs to in the
database */
+ public final long nodeDBHandle;
+ /** The SingleFileInserter we report to. We were created by it and when
we have compressed our data we will
+ * call a method to process it and schedule the data. */
+ public final SingleFileInserter inserter;
+ /** The original data */
+ final Bucket origData;
+ /** If we can get it into one block, don't compress any further */
+ public final int minSize;
+ /** BucketFactory */
+ public final BucketFactory bucketFactory;
+ public final boolean persistent;
+ private transient boolean scheduled;
+ private static boolean logMINOR;
+
+ public InsertCompressor(long nodeDBHandle2, SingleFileInserter
inserter2, Bucket origData2, int minSize2, BucketFactory bf, boolean
persistent) {
+ this.nodeDBHandle = nodeDBHandle2;
+ this.inserter = inserter2;
+ this.origData = origData2;
+ this.minSize = minSize2;
+ this.bucketFactory = bf;
+ this.persistent = persistent;
+ }
+
+ public void init(ObjectContainer container, final ClientContext ctx) {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(persistent) {
+ container.activate(inserter, 1);
+ container.activate(origData, 1);
+ }
+ if(origData == null) {
+ if(inserter == null || inserter.cancelled()) {
+ container.delete(this);
+ return; // Inserter was cancelled, we weren't
told.
+ } else if(inserter.started()) {
+ Logger.error(this, "Inserter started already,
but we are about to attempt to compress the data!");
+ container.delete(this);
+ return; // Already started, no point ... but
this really shouldn't happen.
+ } else {
+ Logger.error(this, "Original data was deleted
but inserter neither deleted nor cancelled nor missing!");
+ container.delete(this);
+ return;
+ }
+ }
+ synchronized(this) {
+ // Can happen with the above activation and lazy query
evaluation.
+ if(scheduled) {
+ Logger.error(this, "Already scheduled
compression, not rescheduling");
+ return;
+ }
+ scheduled = true;
+ }
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Compressing "+this+" :
origData.size="+origData.size()+" for "+inserter);
+ ctx.rc.enqueueNewJob(this);
+ }
+
+ public void tryCompress(final ClientContext context) throws
InsertException {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ long origSize = origData.size();
+ COMPRESSOR_TYPE bestCodec = null;
+ Bucket bestCompressedData = origData;
+ long bestCompressedDataSize = origSize;
+
+ if(logMINOR) Logger.minor(this, "Attempt to compress the data");
+ // Try to compress the data.
+ // Try each algorithm, starting with the fastest and weakest.
+ // Stop when run out of algorithms, or the compressed data fits
in a single block.
+ try {
+ for(final COMPRESSOR_TYPE comp :
COMPRESSOR_TYPE.values()) {
+ boolean shouldFreeOnFinally = true;
+ Bucket result = null;
+ try {
+ if(logMINOR)
+ Logger.minor(this, "Attempt to compress
using " + comp);
+ // Only produce if we are compressing *the
original data*
+ final int phase = comp.metadataID;
+ if(persistent) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(!container.ext().isStored(inserter)) {
+ if(logMINOR)
Logger.minor(this, "Already deleted (start compression): "+inserter+" for
"+InsertCompressor.this);
+ return;
+ }
+
if(container.ext().isActive(inserter))
+
Logger.error(this, "ALREADY ACTIVE in start compression callback: "+inserter);
+
container.activate(inserter, 1);
+
inserter.onStartCompression(comp, container, context);
+
container.deactivate(inserter, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+ try {
+
inserter.onStartCompression(comp, null, context);
+ } catch (Throwable t) {
+ Logger.error(this, "Transient
insert callback threw "+t, t);
+ }
+ }
+
+ result = comp.compress(origData, new
BucketChainBucketFactory(bucketFactory, NodeCHK.BLOCK_SIZE, persistent ?
context.jobRunner : null, 1024), origSize, bestCompressedDataSize);
+ long resultSize = result.size();
+ if(resultSize < minSize) {
+ bestCodec = comp;
+ if(bestCompressedData != null)
+ // Don't need to removeFrom() :
we haven't stored it.
+ bestCompressedData.free();
+ bestCompressedData = result;
+ bestCompressedDataSize = resultSize;
+ shouldFreeOnFinally = false;
+ break;
+ }
+ if(resultSize < bestCompressedDataSize &&
+ // If compressing to CHK,
origSize will always be greater
+ // If compressing to SSK, we
are not interested unless we can get it small enough to fit in the SSK itself
+ origSize >
CHKBlock.DATA_LENGTH) {
+ if(logMINOR)
+ Logger.minor(this, "New size
"+resultSize+" better than old best "+bestCompressedDataSize);
+ if(bestCompressedData != null &&
bestCompressedData != origData)
+ bestCompressedData.free();
+ bestCompressedData = result;
+ bestCompressedDataSize = resultSize;
+ bestCodec = comp;
+ shouldFreeOnFinally = false;
+ }
+ } catch(CompressionOutputSizeException e) {
+ continue; // try next compressor
type
+ } finally {
+ if(shouldFreeOnFinally && (result !=
null) && result != origData)
+ result.free();
+ }
+ }
+
+ final CompressionOutput output = new
CompressionOutput(bestCompressedData, bestCodec);
+
+ if(persistent) {
+
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(!container.ext().isStored(inserter)) {
+ if(logMINOR)
Logger.minor(this, "Already deleted: "+inserter+" for "+InsertCompressor.this);
+
container.delete(InsertCompressor.this);
+ return;
+ }
+
if(container.ext().isActive(inserter))
+ Logger.error(this,
"ALREADY ACTIVE in compressed callback: "+inserter);
+ container.activate(inserter, 1);
+ inserter.onCompressed(output,
container, context);
+ container.deactivate(inserter,
1);
+
container.delete(InsertCompressor.this);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+ // We do it off thread so that RealCompressor
can release the semaphore
+ context.mainExecutor.execute(new PrioRunnable()
{
+
+ public int getPriority() {
+ return
NativeThread.NORM_PRIORITY;
+ }
+
+ public void run() {
+ try {
+
inserter.onCompressed(output, null, context);
+ } catch (Throwable t) {
+ Logger.error(this,
"Caught "+t+" running compression job", t);
+
context.jobRunner.queue(new DBJob() {
+
+ public void
run(ObjectContainer container, ClientContext context) {
+
container.delete(InsertCompressor.this);
+ }
+
+ },
NativeThread.NORM_PRIORITY+1, false);
+ }
+ }
+
+ }, "Insert thread for "+this);
+ }
+
+ } catch (final IOException e) {
+ if(persistent) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(!container.ext().isStored(inserter)) {
+ if(logMINOR)
Logger.minor(this, "Already deleted (on failed): "+inserter+" for
"+InsertCompressor.this);
+
container.delete(InsertCompressor.this);
+ return;
+ }
+
if(container.ext().isActive(inserter))
+ Logger.error(this,
"ALREADY ACTIVE in compress failure callback: "+inserter);
+ container.activate(inserter, 1);
+ container.activate(inserter.cb,
1);
+ inserter.cb.onFailure(new
InsertException(InsertException.BUCKET_ERROR, e, null), inserter, container,
context);
+
container.deactivate(inserter.cb, 1);
+ container.deactivate(inserter,
1);
+
container.delete(InsertCompressor.this);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+ inserter.cb.onFailure(new
InsertException(InsertException.BUCKET_ERROR, e, null), inserter, null,
context);
+ }
+
+ }
+ }
+
+ /**
+ * Create an InsertCompressor, add it to the database, schedule it.
+ * @param container
+ * @param context
+ * @param inserter2
+ * @param origData2
+ * @param oneBlockCompressedSize
+ * @param bf
+ * @return
+ */
+ public static InsertCompressor start(ObjectContainer container,
ClientContext context, SingleFileInserter inserter,
+ Bucket origData, int minSize, BucketFactory bf, boolean
persistent) {
+ if(persistent != (container != null))
+ throw new IllegalStateException("Starting compression,
persistent="+persistent+" but container="+container);
+ InsertCompressor compressor = new
InsertCompressor(context.nodeDBHandle, inserter, origData, minSize, bf,
persistent);
+ if(persistent)
+ container.store(compressor);
+ compressor.init(container, context);
+ return compressor;
+ }
+
+ public static void load(ObjectContainer container, ClientContext
context) {
+ final long handle = context.nodeDBHandle;
+ Query query = container.query();
+ query.constrain(InsertCompressor.class);
+ query.descend("nodeDBHandle").constrain(handle);
+ ObjectSet<InsertCompressor> results = query.execute();
+ while(results.hasNext()) {
+ InsertCompressor comp = results.next();
+ if(!container.ext().isActive(comp)) {
+ Logger.error(InsertCompressor.class,
"InsertCompressor not activated by query?!?!");
+ container.activate(comp, 1);
+ }
+ comp.init(container, context);
+ }
+ }
+
+ public void onFailure(final InsertException e, ClientPutState c,
ClientContext context) {
+ if(persistent) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if(container.ext().isActive(inserter))
+ Logger.error(this, "ALREADY
ACTIVE in compress failure callback: "+inserter);
+ container.activate(inserter, 1);
+ container.activate(inserter.cb, 1);
+ inserter.cb.onFailure(e, inserter,
container, context);
+ container.deactivate(inserter.cb, 1);
+ container.deactivate(inserter, 1);
+ container.delete(InsertCompressor.this);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+ inserter.cb.onFailure(e, inserter, null, context);
+ }
+
+ }
+
+}
+
+class CompressionOutput {
+ public CompressionOutput(Bucket bestCompressedData, COMPRESSOR_TYPE
bestCodec2) {
+ this.data = bestCompressedData;
+ this.bestCodec = bestCodec2;
+ }
+ final Bucket data;
+ final COMPRESSOR_TYPE bestCodec;
+}
\ No newline at end of file
Copied: trunk/freenet/src/freenet/client/async/KeyListener.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/KeyListener.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/KeyListener.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/KeyListener.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,95 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.node.SendableGet;
+
+/**
+ * Transient object created on startup for persistent requests (or at creation
+ * time for non-persistent requests), to monitor the stream of successfully
+ * fetched keys. If a key appears interesting, we schedule a job on the
database
+ * thread to double-check and process the data if we still want it.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ *
+ * saltedKey is the routing key from the key, salted globally (concat a global
+ * salt value and then SHA) in order to save some cycles. Implementations that
+ * use two internal bloom filters may need to have an additional local salt, as
+ * in SplitFileFetcherKeyListener.
+ */
+public interface KeyListener {
+
+ /**
+ * Fast guess at whether we want a key or not. Usually implemented by a
+ * bloom filter.
+ * LOCKING: Should avoid external locking if possible. Will be called
+ * within the CRSBase lock.
+ * @return True if we probably want the key. False if we definitely
don't
+ * want it.
+ */
+ public boolean probablyWantKey(Key key, byte[] saltedKey);
+
+ /**
+ * Do we want the key? This is called by the ULPR code, because
fetching the
+ * key will involve significant work. tripPendingKey() on the other hand
+ * will go straight to handleBlock().
+ * @return -1 if we don't want the key, otherwise the priority of the
request
+ * interested in the key.
+ */
+ public short definitelyWantKey(Key key, byte[] saltedKey,
ObjectContainer container, ClientContext context);
+
+ /**
+ * Find the requests related to a specific key, used in retrying after
cooldown.
+ * Caller should call probablyWantKey() first.
+ */
+ public SendableGet[] getRequestsForKey(Key key, byte[] saltedKey,
ObjectContainer container, ClientContext context);
+
+ /**
+ * Handle the found data, if we really want it.
+ */
+ public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found,
ObjectContainer container, ClientContext context);
+
+ /**
+ * Is this related to a persistent request?
+ */
+ boolean persistent();
+
+ /**
+ * Priority of the associated request.
+ * LOCKING: Should avoid external locking if possible. Will be called
+ * within the CRSBase lock.
+ * @param container Database handle.
+ */
+ short getPriorityClass(ObjectContainer container);
+
+ /**
+ * @return True if when checking the datastore on initial registration,
we
+ * should not promote any blocks found.
+ */
+ public abstract boolean dontCache();
+
+ public long countKeys();
+
+ /**
+ * @return The parent HasKeyListener. This does mean it will be pinned
in
+ * RAM, but it can be deactivated so it's not a big deal.
+ * LOCKING: Should avoid external locking if possible. Will be called
+ * within the CRSBase lock.
+ */
+ public HasKeyListener getHasKeyListener();
+
+ /**
+ * Deactivate the request once it has been removed.
+ */
+ public void onRemove();
+
+ /**
+ * Has the request finished? If every key has been found, or enough
keys have
+ * been found, return true so that the caller can remove it from the
list.
+ */
+ public boolean isEmpty();
+
+ public boolean isSSK();
+
+}
Copied:
trunk/freenet/src/freenet/client/async/KeyListenerConstructionException.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/KeyListenerConstructionException.java)
===================================================================
---
trunk/freenet/src/freenet/client/async/KeyListenerConstructionException.java
(rev 0)
+++
trunk/freenet/src/freenet/client/async/KeyListenerConstructionException.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,20 @@
+package freenet.client.async;
+
+import freenet.client.FetchException;
+
+/**
+ * Thrown when creating a KeyListener fails.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ *
+ */
+public class KeyListenerConstructionException extends Exception {
+
+ KeyListenerConstructionException(FetchException e) {
+ super(e);
+ }
+
+ public FetchException getFetchException() {
+ return (FetchException) getCause();
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/ManifestElement.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ManifestElement.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/ManifestElement.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.support.api.Bucket;
@@ -17,7 +19,7 @@
final String fullName;
/** Data to be inserted. Can be null, if the insert has completed. */
- final Bucket data;
+ Bucket data;
/** MIME type override. null => use default for filename */
final String mimeOverride;
@@ -78,9 +80,17 @@
return false;
}
- public void freeData() {
- if(data != null)
+ public void freeData(ObjectContainer container, boolean persistForever)
{
+ if(data != null) {
+ if(persistForever)
+ container.activate(data, 1);
data.free();
+ if(persistForever)
+ data.removeFrom(container);
+ data = null;
+ }
+ if(persistForever)
+ container.delete(this);
}
public String getName() {
@@ -102,4 +112,10 @@
public FreenetURI getTargetURI() {
return targetURI;
}
+
+ public void removeFrom(ObjectContainer container) {
+ data.removeFrom(container);
+ targetURI.removeFrom(container);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java
===================================================================
--- trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/MinimalSplitfileBlock.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,6 +1,9 @@
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.SplitfileBlock;
+import freenet.support.Logger;
import freenet.support.api.Bucket;
public class MinimalSplitfileBlock implements SplitfileBlock {
@@ -27,5 +30,23 @@
public void setData(Bucket data) {
this.data = data;
}
+
+ public void objectOnDeactivate(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Deactivating "+this, new
Exception("debug"));
+ }
+ public void storeTo(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Storing "+this+" with data: "+data);
+ if(data != null)
+ data.storeTo(container);
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(data != null) data.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/MultiPutCompletionCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,7 +1,10 @@
package freenet.client.async;
+import java.util.Arrays;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertException;
import freenet.client.Metadata;
import freenet.keys.BaseClientKey;
@@ -10,7 +13,7 @@
public class MultiPutCompletionCallback implements PutCompletionCallback,
ClientPutState {
- // LinkedList's rather than HashSet's for memory reasons.
+ // Vector's rather than HashSet's for memory reasons.
// This class will not be used with large sets, so O(n) is cheaper than
O(1) -
// at least it is on memory!
private final Vector waitingFor;
@@ -23,7 +26,15 @@
private boolean finished;
private boolean started;
public final Object token;
+ private final boolean persistent;
+ public void objectOnActivate(ObjectContainer container) {
+ // Only activate the arrays
+ container.activate(waitingFor, 1);
+ container.activate(waitingForBlockSet, 1);
+ container.activate(waitingForFetchable, 1);
+ }
+
public MultiPutCompletionCallback(PutCompletionCallback cb,
BaseClientPutter parent, Object token) {
this.cb = cb;
waitingFor = new Vector();
@@ -32,63 +43,129 @@
this.parent = parent;
this.token = token;
finished = false;
+ this.persistent = parent.persistent();
}
- public void onSuccess(ClientPutState state) {
- onBlockSetFinished(state);
- onFetchable(state);
+ public void onSuccess(ClientPutState state, ObjectContainer container,
ClientContext context) {
+ onBlockSetFinished(state, container, context);
+ onFetchable(state, container);
+ if(persistent)
+ container.activate(waitingFor, 2);
+ boolean complete = true;
synchronized(this) {
- if(finished) return;
+ if(finished) {
+ Logger.error(this, "Already finished but got
onSuccess() for "+state+" on "+this);
+ return;
+ }
waitingFor.remove(state);
- if(!(waitingFor.isEmpty() && started))
- return;
+ if(waitingForBlockSet.contains(state)) {
+ waitingForBlockSet.remove(state);
+ if(persistent && !waitingFor.isEmpty())
+
container.ext().store(waitingForBlockSet, 1);
+ }
+ if(waitingForFetchable.contains(state)) {
+ waitingForFetchable.remove(state);
+ if(persistent && !waitingFor.isEmpty())
+
container.ext().store(waitingForFetchable, 1);
+ }
+ if(!(waitingFor.isEmpty() && started)) {
+ if(persistent) {
+ container.ext().store(waitingFor, 1);
+ }
+ complete = false;
+ }
+ if(state == generator)
+ generator = null;
}
- complete(null);
+ if(persistent) state.removeFrom(container, context);
+ if(complete) {
+ Logger.minor(this, "Completing...");
+ complete(null, container, context);
+ }
}
- public void onFailure(InsertException e, ClientPutState state) {
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ boolean complete = true;
synchronized(this) {
- if(finished) return;
+ if(finished) {
+ Logger.error(this, "Already finished but got
onFailure() for "+state+" on "+this);
+ return;
+ }
waitingFor.remove(state);
waitingForBlockSet.remove(state);
waitingForFetchable.remove(state);
if(!(waitingFor.isEmpty() && started)) {
+ if(this.e != null) {
+ if(persistent) {
+ container.activate(this.e, 10);
+ this.e.removeFrom(container);
+ }
+ }
this.e = e;
- return;
+ if(persistent)
+ container.store(this);
+ complete = false;
}
+ if(state == generator)
+ generator = null;
}
- complete(e);
+ if(persistent) {
+ container.ext().store(waitingFor, 2);
+ container.ext().store(waitingForBlockSet, 2);
+ container.ext().store(waitingForFetchable, 2);
+ }
+ if(persistent) state.removeFrom(container, context);
+ if(complete)
+ complete(e, container, context);
}
- private void complete(InsertException e) {
+ private void complete(InsertException e, ObjectContainer container,
ClientContext context) {
synchronized(this) {
if(finished) return;
finished = true;
- if(e != null && this.e != null && this.e != e) {
- if(!(e.getMode() == InsertException.CANCELLED))
// Cancelled is okay, ignore it, we cancel after failure sometimes.
- Logger.error(this, "Completing with
"+e+" but already set "+this.e);
+ if(e != null && this.e != null && this.e != e &&
persistent) {
+ container.activate(this.e, 10);
+ this.e.removeFrom(container);
}
- if(e == null) e = this.e;
+ if(e == null) {
+ e = this.e;
+ if(persistent && e != null) {
+ container.activate(e, 10);
+ e = e.clone(); // Since we will remove
it, we can't pass it on
+ }
+ }
}
+ if(persistent) {
+ container.store(this);
+ container.activate(cb, 1);
+ }
if(e != null)
- cb.onFailure(e, this);
+ cb.onFailure(e, this, container, context);
else
- cb.onSuccess(this);
+ cb.onSuccess(this, container, context);
}
- public synchronized void addURIGenerator(ClientPutState ps) {
- add(ps);
+ public synchronized void addURIGenerator(ClientPutState ps,
ObjectContainer container) {
+ add(ps, container);
generator = ps;
+ if(persistent)
+ container.store(this);
}
- public synchronized void add(ClientPutState ps) {
+ public synchronized void add(ClientPutState ps, ObjectContainer
container) {
if(finished) return;
waitingFor.add(ps);
waitingForBlockSet.add(ps);
waitingForFetchable.add(ps);
+ if(persistent) {
+ container.store(ps);
+ container.ext().store(waitingFor, 2);
+ container.ext().store(waitingForBlockSet, 2);
+ container.ext().store(waitingForFetchable, 2);
+ }
}
- public void arm() {
+ public void arm(ObjectContainer container, ClientContext context) {
boolean allDone;
boolean allGotBlocks;
synchronized(this) {
@@ -96,12 +173,15 @@
allDone = waitingFor.isEmpty();
allGotBlocks = waitingForBlockSet.isEmpty();
}
-
+ if(persistent) {
+ container.store(this);
+ container.activate(cb, 1);
+ }
if(allGotBlocks) {
- cb.onBlockSetFinished(this);
+ cb.onBlockSetFinished(this, container, context);
}
if(allDone) {
- complete(e);
+ complete(e, container, context);
}
}
@@ -109,55 +189,79 @@
return parent;
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
synchronized(this) {
if(state != generator) return;
}
- cb.onEncode(key, this);
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onEncode(key, this, container, context);
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
ClientPutState[] states = new ClientPutState[waitingFor.size()];
synchronized(this) {
states = (ClientPutState[]) waitingFor.toArray(states);
}
- for(int i=0;i<states.length;i++)
- states[i].cancel();
+ boolean logDEBUG = Logger.shouldLog(Logger.DEBUG, this);
+ for(int i=0;i<states.length;i++) {
+ if(persistent)
+ container.activate(states[i], 1);
+ if(logDEBUG) Logger.minor(this, "Cancelling state "+i+"
of "+states.length+" : "+states[i]);
+ states[i].cancel(container, context);
+ }
}
- public synchronized void onTransition(ClientPutState oldState,
ClientPutState newState) {
+ public synchronized void onTransition(ClientPutState oldState,
ClientPutState newState, ObjectContainer container) {
if(generator == oldState)
generator = newState;
if(oldState == newState) return;
for(int i=0;i<waitingFor.size();i++) {
- if(waitingFor.get(i) == oldState) waitingFor.set(i,
newState);
+ if(waitingFor.get(i) == oldState) {
+ waitingFor.set(i, newState);
+ container.ext().store(waitingFor, 2);
+ }
}
for(int i=0;i<waitingFor.size();i++) {
- if(waitingForBlockSet.get(i) == oldState)
waitingForBlockSet.set(i, newState);
+ if(waitingForBlockSet.get(i) == oldState) {
+ waitingForBlockSet.set(i, newState);
+ container.ext().store(waitingFor, 2);
+ }
}
for(int i=0;i<waitingFor.size();i++) {
- if(waitingForFetchable.get(i) == oldState)
waitingForFetchable.set(i, newState);
+ if(waitingForFetchable.get(i) == oldState) {
+ waitingForFetchable.set(i, newState);
+ container.ext().store(waitingFor, 2);
+ }
}
}
- public synchronized void onMetadata(Metadata m, ClientPutState state) {
+ public synchronized void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(cb, 1);
if(generator == state) {
- cb.onMetadata(m, this);
+ cb.onMetadata(m, this, container, context);
} else {
Logger.error(this, "Got metadata for "+state);
}
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
+ if(persistent)
+ container.activate(waitingForBlockSet, 2);
synchronized(this) {
this.waitingForBlockSet.remove(state);
+ if(persistent)
+ container.ext().store(waitingForBlockSet, 2);
if(!started) return;
if(!waitingForBlockSet.isEmpty()) return;
}
- cb.onBlockSetFinished(this);
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onBlockSetFinished(this, container, context);
}
- public void schedule() throws InsertException {
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
// Do nothing
}
@@ -169,13 +273,44 @@
return null;
}
- public void onFetchable(ClientPutState state) {
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
+ if(persistent)
+ container.activate(waitingForFetchable, 2);
synchronized(this) {
this.waitingForFetchable.remove(state);
+ if(persistent)
+ container.ext().store(waitingForFetchable, 2);
if(!started) return;
if(!waitingForFetchable.isEmpty()) return;
}
- cb.onFetchable(this);
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFetchable(this, container);
}
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.activate(waitingFor, 2);
+ container.activate(waitingForBlockSet, 2);
+ container.activate(waitingForFetchable, 2);
+ // Should have been cleared by now
+ if(!waitingFor.isEmpty())
+ Logger.error(this, "waitingFor not empty in
removeFrom() on "+this+" : "+waitingFor);
+ if(!waitingForBlockSet.isEmpty())
+ Logger.error(this, "waitingForBlockSet not empty in
removeFrom() on "+this+" : "+waitingFor);
+ if(!waitingForFetchable.isEmpty())
+ Logger.error(this, "waitingForFetchable not empty in
removeFrom() on "+this+" : "+waitingFor);
+ container.delete(waitingFor);
+ container.delete(waitingForBlockSet);
+ container.delete(waitingForFetchable);
+ // cb is at a higher level, we don't remove that, it removes
itself
+ // generator is just a reference to one of the waitingFor's
+ // parent removes itself
+ if(e != null) {
+ container.activate(e, 5);
+ e.removeFrom(container);
+ }
+ // whoever set the token is responsible for removing it
+ container.delete(this);
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/NoValidBlocksException.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/NoValidBlocksException.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/NoValidBlocksException.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/NoValidBlocksException.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,5 @@
+package freenet.client.async;
+
+public class NoValidBlocksException extends Exception {
+
+}
Modified: trunk/freenet/src/freenet/client/async/OfferedKeysList.java
===================================================================
--- trunk/freenet/src/freenet/client/async/OfferedKeysList.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/OfferedKeysList.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,14 +4,20 @@
package freenet.client.async;
import java.util.HashSet;
+import java.util.List;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.RandomSource;
import freenet.keys.Key;
import freenet.node.BaseSendableGet;
import freenet.node.KeysFetchingLocally;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestScheduler;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
import freenet.node.NodeClientCore.SimpleRequestSenderCompletionListener;
import freenet.support.Logger;
import freenet.support.LogThresholdCallback;
@@ -30,7 +36,7 @@
* @author toad
*
*/
-public class OfferedKeysList extends BaseSendableGet {
+public class OfferedKeysList extends BaseSendableGet implements RequestClient {
private final HashSet<Key> keys;
private final Vector<Key> keysList; // O(1) remove random element the
way we use it, see chooseKey().
@@ -48,13 +54,16 @@
private final RandomSource random;
private final short priorityClass;
private final NodeClientCore core;
+ private final boolean isSSK;
- OfferedKeysList(NodeClientCore core, RandomSource random, short
priorityClass) {
+ OfferedKeysList(NodeClientCore core, RandomSource random, short
priorityClass, boolean isSSK) {
+ super(false);
this.keys = new HashSet<Key>();
this.keysList = new Vector<Key>();
this.random = random;
this.priorityClass = priorityClass;
this.core = core;
+ this.isSSK = isSSK;
}
/** Called when a key is found, when it no longer belongs to this list
etc. */
@@ -67,24 +76,34 @@
assert(keysList.size() == keys.size());
}
- public synchronized boolean isEmpty() {
+ public synchronized boolean isEmpty(ObjectContainer container) {
return keys.isEmpty();
}
@Override
- public Object[] allKeys() {
+ public SendableRequestItem[] allKeys(ObjectContainer container,
ClientContext context) {
// Not supported.
throw new UnsupportedOperationException();
}
@Override
- public Object[] sendableKeys() {
+ public SendableRequestItem[] sendableKeys(ObjectContainer container,
ClientContext context) {
// Not supported.
throw new UnsupportedOperationException();
}
+ private class MySendableRequestItem implements SendableRequestItem {
+ final Key key;
+ MySendableRequestItem(Key key) {
+ this.key = key;
+ }
+ public void dump() {
+ // Ignore, we will be GC'ed
+ }
+ }
+
@Override
- public synchronized Object chooseKey(KeysFetchingLocally fetching) {
+ public synchronized SendableRequestItem chooseKey(KeysFetchingLocally
fetching, ObjectContainer container, ClientContext context) {
assert(keysList.size() == keys.size());
if(keys.size() == 1) {
// Shortcut the common case
@@ -92,7 +111,7 @@
if(fetching.hasKey(k)) return null;
keys.remove(k);
keysList.setSize(0);
- return k;
+ return new MySendableRequestItem(k);
}
for(int i=0;i<10;i++) {
// Pick a random key
@@ -105,13 +124,13 @@
keysList.setSize(keysList.size()-1);
keys.remove(k);
assert(keysList.size() == keys.size());
- return k;
+ return new MySendableRequestItem(k);
}
return null;
}
@Override
- public synchronized boolean hasValidKeys(KeysFetchingLocally fetching) {
+ public synchronized boolean hasValidKeys(KeysFetchingLocally fetching,
ObjectContainer container, ClientContext context) {
assert(keysList.size() == keys.size());
if(keys.size() == 1) {
// Shortcut the common case
@@ -131,7 +150,7 @@
}
@Override
- public Object getClient() {
+ public RequestClient getClient(ObjectContainer container) {
return this;
}
@@ -142,7 +161,7 @@
}
@Override
- public short getPriorityClass() {
+ public short getPriorityClass(ObjectContainer container) {
return priorityClass;
}
@@ -152,33 +171,35 @@
}
@Override
- public void internalError(Object keyNum, Throwable t, RequestScheduler
sched) {
+ public void internalError(Throwable t, RequestScheduler sched,
ObjectContainer container, ClientContext context, boolean persistent) {
Logger.error(this, "Internal error: "+t, t);
}
@Override
- public boolean send(NodeClientCore node, RequestScheduler sched, Object
keyNum) {
- Key key = (Key) keyNum;
- // Have to cache it in order to propagate it; FIXME
- // Don't let a node force us to start a real request for a
specific key.
- // We check the datastore, take up offers if any (on a short
timeout), and then quit if we still haven't fetched the data.
- // Obviously this may have a marginal impact on load but it
should only be marginal.
- core.asyncGet(key, true, true, new
SimpleRequestSenderCompletionListener() {
+ public SendableRequestSender getSender(ObjectContainer container,
ClientContext context) {
+ return new SendableRequestSender() {
- public void completed(boolean success) {
- // Ignore
+ public boolean send(NodeClientCore core,
RequestScheduler sched, ClientContext context, ChosenBlock req) {
+ Key key = ((MySendableRequestItem)
req.token).key;
+ // Have to cache it in order to propagate it;
FIXME
+ // Don't let a node force us to start a real
request for a specific key.
+ // We check the datastore, take up offers if
any (on a short timeout), and then quit if we still haven't fetched the data.
+ // Obviously this may have a marginal impact on
load but it should only be marginal.
+ core.asyncGet(key, true, true, new
SimpleRequestSenderCompletionListener() {
+
+ public void completed(boolean success) {
+ // Ignore
+ }
+
+ });
+ return true;
}
- });
- return true;
+ };
}
- public boolean canRemove() {
- return false;
- }
-
@Override
- public boolean isCancelled() {
+ public boolean isCancelled(ObjectContainer container) {
return false;
}
@@ -192,8 +213,34 @@
}
@Override
- public Key getNodeKey(Object token) {
- return (Key) token;
+ public Key getNodeKey(SendableRequestItem token, ObjectContainer
container) {
+ return ((MySendableRequestItem) token).key;
}
+ public boolean isSSK() {
+ return isSSK;
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ throw new UnsupportedOperationException("Transient only");
+ }
+
+ @Override
+ public boolean isInsert() {
+ return false;
+ }
+
+ @Override
+ public ClientRequestScheduler getScheduler(ClientContext context) {
+ if(isSSK)
+ return context.getSskFetchScheduler();
+ else
+ return context.getChkFetchScheduler();
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/PersistentChosenBlock.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/PersistentChosenBlock.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/PersistentChosenBlock.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/PersistentChosenBlock.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,162 @@
+package freenet.client.async;
+
+import freenet.keys.ClientKey;
+import freenet.keys.Key;
+import freenet.node.LowLevelGetException;
+import freenet.node.LowLevelPutException;
+import freenet.node.NodeClientCore;
+import freenet.node.RequestScheduler;
+import freenet.node.SendableGet;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
+import freenet.support.Logger;
+
+/**
+ * A block within a ChosenRequest.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class PersistentChosenBlock extends ChosenBlock {
+
+ public final PersistentChosenRequest parent;
+ public final boolean isInsert;
+
+ /* Completion */
+ private boolean finished;
+
+ /** If a SendableGet failed, failedGet will be set to the exception
generated. Cannot be null if it failed. */
+ private LowLevelGetException failedGet;
+
+ private boolean fetchSucceeded; // The actual block is not our problem.
+
+ /* Inserts */
+ private boolean insertSucceeded;
+ /** If a SendableInsert failed, failedPut will be set to the exception
generated. Cannot be null if it failed. */
+ private LowLevelPutException failedPut;
+
+ public PersistentChosenBlock(boolean isInsert, PersistentChosenRequest
parent, SendableRequestItem token, Key key, ClientKey ckey, RequestScheduler
sched) {
+ super(token, key, ckey, parent.localRequestOnly,
parent.cacheLocalRequests, parent.ignoreStore, sched);
+ this.isInsert = isInsert;
+ this.parent = parent;
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Created "+this+" for "+parent+" ckey="+ckey);
+ }
+
+ @Override
+ public void onFetchSuccess(ClientContext context) {
+ assert(!isInsert);
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "Already finished in
onSuccess() on "+this, new Exception("debug"));
+ return;
+ }
+ finished = true;
+ fetchSucceeded = true;
+ }
+ parent.onFinished(this, context);
+ parent.scheduler.succeeded((SendableGet)parent.request, this);
+ }
+
+ @Override
+ public void onFailure(LowLevelGetException e, ClientContext context) {
+ assert(!isInsert);
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "Already finished in
onFailure() on "+this, new Exception("debug"));
+ return;
+ }
+ if(e == null)
+ throw new NullPointerException();
+ failedGet = e;
+ finished = true;
+ }
+ parent.onFinished(this, context);
+ }
+
+ @Override
+ public void onInsertSuccess(ClientContext context) {
+ assert(isInsert);
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "Already finished in
onSuccess() on "+this, new Exception("debug"));
+ return;
+ }
+ insertSucceeded = true;
+ finished = true;
+ }
+ parent.onFinished(this, context);
+ }
+
+ @Override
+ public void onFailure(LowLevelPutException e, ClientContext context) {
+ assert(isInsert);
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "Already finished in
onFailure() on "+this, new Exception("debug"));
+ return;
+ }
+ if(e == null)
+ throw new NullPointerException();
+ failedPut = e;
+ finished = true;
+ }
+ parent.onFinished(this, context);
+ }
+
+ LowLevelGetException failedGet() {
+ return failedGet;
+ }
+
+ boolean insertSucceeded() {
+ return insertSucceeded;
+ }
+
+ boolean fetchSucceeded() {
+ return fetchSucceeded;
+ }
+
+ LowLevelPutException failedPut() {
+ return failedPut;
+ }
+
+ @Override
+ public boolean isPersistent() {
+ return true;
+ }
+
+ @Override
+ public boolean isCancelled() {
+ // We can't tell without accesing the database, and we can't
access the database on the request starter thread.
+ return false;
+ }
+
+ @Override
+ public boolean send(NodeClientCore core, RequestScheduler sched) {
+ try {
+ return super.send(core, sched);
+ } finally {
+ boolean wasFinished;
+ synchronized(this) {
+ wasFinished = finished;
+ if(!finished) {
+ finished = true;
+ if(parent.request instanceof
SendableGet) {
+ Logger.error(this, "SendableGet
"+parent.request+" didn't call a callback on "+this);
+ }
+ }
+ }
+ if(!wasFinished) {
+ parent.onFinished(this, sched.getContext());
+ }
+ }
+ }
+
+ @Override
+ public short getPriority() {
+ return parent.prio;
+ }
+
+ @Override
+ public SendableRequestSender getSender(ClientContext context) {
+ return parent.sender;
+ }
+
+}
Copied: trunk/freenet/src/freenet/client/async/PersistentChosenRequest.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/PersistentChosenRequest.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/PersistentChosenRequest.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/PersistentChosenRequest.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,282 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.Vector;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.FetchContext;
+import freenet.keys.Key;
+import freenet.node.BulkCallFailureItem;
+import freenet.node.LowLevelGetException;
+import freenet.node.RequestScheduler;
+import freenet.node.SendableGet;
+import freenet.node.SendableInsert;
+import freenet.node.SendableRequest;
+import freenet.node.SendableRequestSender;
+import freenet.node.SupportsBulkCallFailure;
+import freenet.support.Logger;
+import freenet.support.io.NativeThread;
+
+/**
+ * A persistent SendableRequest chosen by ClientRequestScheduler. In order to
minimize database I/O
+ * (and hence disk I/O and object churn), we select the entire
SendableRequest, including all blocks
+ * on it. We keep it in RAM, until all blocks have succeeded/failed. Then we
call all relevant
+ * callbacks in a single transaction.
+ * @author toad
+ */
+public class PersistentChosenRequest {
+
+ /** The request object */
+ public transient final SendableRequest request;
+ /** Priority when we selected it */
+ public transient final short prio;
+ /** Retry count when we selected it */
+ public transient final int retryCount;
+ public transient final boolean localRequestOnly;
+ public transient final boolean cacheLocalRequests;
+ public transient final boolean ignoreStore;
+ public transient final ArrayList<PersistentChosenBlock>
blocksNotStarted;
+ public transient final ArrayList<PersistentChosenBlock> blocksStarted;
+ public transient final ArrayList<PersistentChosenBlock> blocksFinished;
+ public final RequestScheduler scheduler;
+ public final SendableRequestSender sender;
+ private boolean logMINOR;
+ private boolean finished;
+
+ PersistentChosenRequest(SendableRequest req, short prio, int
retryCount, ObjectContainer container, RequestScheduler sched, ClientContext
context) throws NoValidBlocksException {
+ request = req;
+ this.prio = prio;
+ this.retryCount = retryCount;
+ if(req instanceof SendableGet) {
+ SendableGet sg = (SendableGet) req;
+ FetchContext ctx = sg.getContext();
+ if(container != null)
+ container.activate(ctx, 1);
+ localRequestOnly = ctx.localRequestOnly;
+ cacheLocalRequests = ctx.cacheLocalRequests;
+ ignoreStore = ctx.ignoreStore;
+ } else {
+ SendableInsert sg = (SendableInsert) req;
+ localRequestOnly = false;
+ cacheLocalRequests = sg.cacheInserts(container);
+ ignoreStore = false;
+ }
+ blocksNotStarted = new ArrayList<PersistentChosenBlock>();
+ blocksStarted = new ArrayList<PersistentChosenBlock>();
+ blocksFinished = new ArrayList<PersistentChosenBlock>();
+ this.scheduler = sched;
+ // Fill up blocksNotStarted
+ boolean reqActive = container.ext().isActive(req);
+ if(!reqActive)
+ container.activate(req, 1);
+ List<PersistentChosenBlock> candidates = req.makeBlocks(this,
sched, container, context);
+ if(candidates == null) {
+ if(!reqActive) container.deactivate(req, 1);
+ throw new NoValidBlocksException();
+ }
+ for(PersistentChosenBlock block : candidates) {
+ Key key = block.key;
+ if(key != null && sched.hasFetchingKey(key))
+ continue;
+ blocksNotStarted.add(block);
+ }
+ sender = req.getSender(container, context);
+ if(!reqActive)
+ container.deactivate(req, 1);
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+
+ void onFinished(PersistentChosenBlock block, ClientContext context) {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR)
+ Logger.minor(this, "onFinished() on "+this+" for
"+block, new Exception("debug"));
+ synchronized(this) {
+ // Remove by pointer
+ for(int i=0;i<blocksNotStarted.size();i++) {
+ if(blocksNotStarted.get(i) == block) {
+ blocksNotStarted.remove(i);
+ Logger.error(this, "Block finished but
was in blocksNotStarted: "+block+" for "+this, new Exception("error"));
+ i--;
+ }
+ }
+ for(int i=0;i<blocksStarted.size();i++) {
+ if(blocksStarted.get(i) == block) {
+ blocksStarted.remove(i);
+ i--;
+ }
+ }
+ for(PersistentChosenBlock cmp : blocksFinished)
+ if(cmp == block) {
+ Logger.error(this, "Block already in
blocksFinished: "+block+" for "+this);
+ return;
+ }
+ blocksFinished.add(block);
+ if(!(blocksNotStarted.isEmpty() &&
blocksStarted.isEmpty())) {
+ if(logMINOR)
+ Logger.minor(this, "Not finishing yet:
blocks not started: "+blocksNotStarted.size()+" started:
"+blocksStarted.size()+" finished: "+blocksFinished.size());
+ return;
+ }
+ }
+ // All finished.
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ finish(container, context, false, false);
+ }
+
+ }, NativeThread.NORM_PRIORITY + 1, false);
+ }
+
+ private void finish(ObjectContainer container, ClientContext context,
boolean dumping, boolean alreadyActive) {
+ if(!container.ext().isStored(request)) {
+ if(logMINOR) Logger.minor(this, "Request apparently
already deleted: "+request+" on "+this);
+ return;
+ }
+ if((!alreadyActive) && container.ext().isActive(request))
+ Logger.error(this, "ALREADY ACTIVATED: "+request, new
Exception("debug"));
+ if(!alreadyActive)
+ container.activate(request, 1);
+ Logger.normal(this, "Finishing "+this+" for "+request);
+ // Call all the callbacks.
+ PersistentChosenBlock[] finishedBlocks;
+ int startedSize;
+ synchronized(this) {
+ if(finished) {
+ if(blocksFinished.isEmpty()) {
+ // Okay...
+ if(!alreadyActive)
+ container.deactivate(request,
1);
+ return;
+ } else {
+ Logger.error(this, "Finished but
blocksFinished not empty on "+this, new Exception("debug"));
+ // Process the blocks...
+ }
+ }
+ startedSize = blocksStarted.size();
+ if(startedSize > 0) {
+ Logger.error(this, "Still waiting for callbacks
on "+this+" for "+startedSize+" blocks");
+ // Wait... if we set finished, we have to
process them now, and
+ // we can't process them now because we haven't
had the callbacks,
+ // we don't know what the outcome will be.
+ return;
+ }
+ finished = true;
+ finishedBlocks = blocksFinished.toArray(new
PersistentChosenBlock[blocksFinished.size()]);
+ }
+ if(finishedBlocks.length == 0) {
+ if(!dumping)
+ Logger.error(this, "No finished blocks in
finish() on "+this);
+ else if(logMINOR)
+ Logger.minor(this, "No finished blocks in
finish() on "+this);
+ // Remove from running requests, we won't be called.
+ scheduler.removeRunningRequest(request);
+ if(!alreadyActive)
+ container.deactivate(request, 1);
+ return;
+ }
+ if(request instanceof SendableGet) {
+ boolean supportsBulk = request instanceof
SupportsBulkCallFailure;
+ Vector<BulkCallFailureItem> bulkFailItems = null;
+ for(PersistentChosenBlock block : finishedBlocks) {
+ if(!block.fetchSucceeded()) {
+ LowLevelGetException e =
block.failedGet();
+ if(supportsBulk) {
+ if(bulkFailItems == null)
+ bulkFailItems = new
Vector<BulkCallFailureItem>();
+ bulkFailItems.add(new
BulkCallFailureItem(e, block.token));
+ } else {
+
((SendableGet)request).onFailure(e, block.token, container, context);
+ container.commit(); // db4o is
read-committed, so we need to commit here.
+ }
+ }
+ }
+ if(bulkFailItems != null) {
+
((SupportsBulkCallFailure)request).onFailure(bulkFailItems.toArray(new
BulkCallFailureItem[bulkFailItems.size()]), container, context);
+ container.commit(); // db4o is read-committed,
so we need to commit here.
+ }
+ } else /*if(request instanceof SendableInsert)*/ {
+ container.activate(request, 1);
+ for(PersistentChosenBlock block : finishedBlocks) {
+ container.activate(block, 1);
+ if(block.insertSucceeded()) {
+
((SendableInsert)request).onSuccess(block.token, container, context);
+ container.commit(); // db4o is
read-committed, so we need to commit here.
+ } else {
+
((SendableInsert)request).onFailure(block.failedPut(), block.token, container,
context);
+ container.commit(); // db4o is
read-committed, so we need to commit here.
+ }
+ }
+ }
+ scheduler.removeRunningRequest(request);
+ if(request instanceof SendableInsert) {
+ // More blocks may have been added, because splitfile
inserts
+ // do not separate retries into separate
SendableInsert's.
+ if(!container.ext().isActive(request))
+ container.activate(request, 1);
+ if((!request.isEmpty(container)) &&
(!request.isCancelled(container))) {
+
request.getScheduler(context).maybeAddToStarterQueue(request, container, null);
+ request.getScheduler(context).wakeStarter();
+ }
+ }
+ if(!alreadyActive)
+ container.deactivate(request, 1);
+ }
+
+ public synchronized ChosenBlock grabNotStarted(Random random,
RequestScheduler sched) {
+ while(true) {
+ int size = blocksNotStarted.size();
+ if(size == 0) return null;
+ PersistentChosenBlock ret;
+ if(size == 1) ret = blocksNotStarted.remove(0);
+ else ret =
blocksNotStarted.remove(random.nextInt(size));
+ Key key = ret.key;
+ if(key != null && sched.hasFetchingKey(key))
+ // Already fetching; remove from list.
+ continue;
+ blocksStarted.add(ret);
+ return ret;
+ }
+ }
+
+ public synchronized int sizeNotStarted() {
+ return blocksNotStarted.size();
+ }
+
+ public void onDumped(ClientRequestSchedulerCore core, ObjectContainer
container, boolean reqAlreadyActive) {
+ if(logMINOR)
+ Logger.minor(this, "Dumping "+this);
+ scheduler.removeRunningRequest(request);
+ boolean wasStarted;
+ PersistentChosenBlock[] blocks;
+ synchronized(this) {
+ blocks = blocksNotStarted.toArray(new
PersistentChosenBlock[blocksNotStarted.size()]);
+ blocksNotStarted.clear();
+ wasStarted = !blocksStarted.isEmpty();
+ }
+ for(PersistentChosenBlock block : blocks)
+ block.onDumped();
+ if(!wasStarted) {
+ if(logMINOR) Logger.minor(this, "Finishing immediately
in onDumped() as nothing pending: "+this);
+ finish(container, core.sched.clientContext, true,
reqAlreadyActive);
+ }
+ }
+
+ public synchronized void pruneDuplicates(ClientRequestScheduler sched) {
+ for(int i=0;i<blocksNotStarted.size();i++) {
+ PersistentChosenBlock block = blocksNotStarted.get(i);
+ Key key = block.key;
+ if(key == null) continue;
+ if(sched.hasFetchingKey(key)) {
+ blocksNotStarted.remove(i);
+ if(logMINOR) Logger.minor(this, "Pruned
duplicate "+block+" from "+this);
+ i--;
+ }
+ }
+ }
+}
Copied: trunk/freenet/src/freenet/client/async/PersistentCooldownQueue.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/PersistentCooldownQueue.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/PersistentCooldownQueue.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/PersistentCooldownQueue.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,159 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.client.async;
+
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Candidate;
+import com.db4o.query.Evaluation;
+import com.db4o.query.Query;
+
+import freenet.keys.Key;
+import freenet.node.SendableGet;
+import freenet.support.HexUtil;
+import freenet.support.Logger;
+
+/**
+ * Persistable implementation of CooldownQueue. Much simpler than
RequestCooldownQueue,
+ * and would use much more memory if it wasn't for the database!
+ *
+ * Creator must call setContainer() and setCooldownTime() before use, after
pulling it
+ * out of the database.
+ * @author toad
+ */
+public class PersistentCooldownQueue implements CooldownQueue {
+
+ private long cooldownTime;
+
+ void setCooldownTime(long time) {
+ cooldownTime = time;
+ }
+
+ public long add(Key key, SendableGet client, ObjectContainer container)
{
+ assert(cooldownTime != 0);
+ long removeTime = System.currentTimeMillis() + cooldownTime;
+ PersistentCooldownQueueItem persistentCooldownQueueItem = new
PersistentCooldownQueueItem(client, key, removeTime, this);
+ container.store(persistentCooldownQueueItem);
+ return removeTime;
+ }
+
+ public boolean removeKey(final Key key, final SendableGet client, final
long time, ObjectContainer container) {
+ boolean found = false;
+ final String keyAsBytes = HexUtil.bytesToHex(key.getFullKey());
+ Query query = container.query();
+ query.constrain(PersistentCooldownQueueItem.class);
+ query.descend("keyAsBytes").constrain(keyAsBytes);
+ // The result from parent will be huge, and client may be huge
too.
+ // Don't bother with a join, just check in the evaluation.
+// query.descend("client").constrain(client);
+// query.descend("parent").constrain(this);
+ Evaluation eval = new Evaluation() {
+
+ public void evaluate(Candidate candidate) {
+ PersistentCooldownQueueItem item =
(PersistentCooldownQueueItem) candidate.getObject();
+ if(item.client != client) {
+ candidate.include(false);
+ return;
+ }
+ if(item.parent != PersistentCooldownQueue.this)
{
+ candidate.include(false);
+ return;
+ }
+ Key k = item.key;
+ candidate.objectContainer().activate(k, 5);
+ if(k.equals(key))
+ candidate.include(true);
+ else {
+ candidate.include(false);
+
candidate.objectContainer().deactivate(k, 5);
+ }
+ }
+
+ };
+ query.constrain(eval);
+ ObjectSet results = query.execute();
+
+ while(results.hasNext()) {
+ found = true;
+ PersistentCooldownQueueItem i =
(PersistentCooldownQueueItem) results.next();
+ container.delete(i);
+ }
+ return found;
+ }
+
+ public Object removeKeyBefore(final long now, long dontCareAfterMillis,
ObjectContainer container, int maxCount) {
+ // Will be called repeatedly until no more keys are returned,
so it doesn't
+ // matter very much if they're not in order.
+
+ // This query returns bogus results (cooldown items with times
in the future).
+// ObjectSet results = container.query(new Predicate() {
+// public boolean match(PersistentCooldownQueueItem
persistentCooldownQueueItem) {
+// if(persistentCooldownQueueItem.time >= now)
return false;
+// if(persistentCooldownQueueItem.parent !=
PersistentCooldownQueue.this) return false;
+// return true;
+// }
+// });
+ // Lets re-code it in SODA.
+ long tStart = System.currentTimeMillis();
+ Query query = container.query();
+ query.constrain(PersistentCooldownQueueItem.class);
+ query.descend("time").constrain(new Long(now)).smaller()
+
.and(query.descend("parent").constrain(this).identity());
+ ObjectSet results = query.execute();
+ if(results.hasNext()) {
+ long tEnd = System.currentTimeMillis();
+ if(tEnd - tStart > 1000)
+ Logger.error(this, "Query took "+(tEnd-tStart));
+ else
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Query took
"+(tEnd-tStart));
+ ArrayList v = new ArrayList(Math.min(maxCount,
results.size()));
+ while(results.hasNext() && v.size() < maxCount) {
+ PersistentCooldownQueueItem i =
(PersistentCooldownQueueItem) results.next();
+ if(i.time >= now) {
+ Logger.error(this, "removeKeyBefore():
time >= now: diff="+(now-i.time));
+ continue;
+ }
+ if(i.parent != this) {
+ Logger.error(this, "parent="+i.parent+"
but should be "+this);
+ continue;
+ }
+ container.delete(i);
+ v.add(i.key);
+ }
+ if(!v.isEmpty()) {
+ return (Key[]) v.toArray(new Key[v.size()]);
+ } else {
+ query = container.query();
+
query.descend("time").orderAscending().constrain(new Long(now +
dontCareAfterMillis)).smaller().
+
and(query.descend("parent").constrain(this).identity());
+ results = query.execute();
+ if(results.hasNext()) {
+ return ((PersistentCooldownQueueItem)
results.next()).time;
+ } else {
+ return null;
+ }
+ }
+ } else {
+ long tEnd = System.currentTimeMillis();
+ if(tEnd - tStart > 1000)
+ Logger.error(this, "Query took "+(tEnd-tStart));
+ else
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Query took
"+(tEnd-tStart));
+ return null;
+ }
+ }
+
+ public long size(ObjectContainer container) {
+ Query query = container.query();
+ query.constrain(PersistentCooldownQueueItem.class);
+ query.descend("parent").constrain(this).identity();
+ ObjectSet results = query.execute();
+ return results.size();
+ }
+
+}
Copied: trunk/freenet/src/freenet/client/async/PersistentCooldownQueueItem.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/PersistentCooldownQueueItem.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/PersistentCooldownQueueItem.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/PersistentCooldownQueueItem.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,26 @@
+/**
+ *
+ */
+package freenet.client.async;
+
+import freenet.keys.Key;
+import freenet.node.SendableGet;
+import freenet.support.HexUtil;
+
+public class PersistentCooldownQueueItem {
+ final SendableGet client;
+ final Key key;
+ /** Same trick as we use on PendingKeyItem. Necessary because db4o
doesn't
+ * index anything by value except for strings. */
+ final String keyAsBytes;
+ final long time;
+ final PersistentCooldownQueue parent;
+
+ PersistentCooldownQueueItem(SendableGet client, Key key, long time,
PersistentCooldownQueue parent) {
+ this.client = client;
+ this.key = key;
+ this.keyAsBytes = HexUtil.bytesToHex(key.getFullKey());
+ this.time = time;
+ this.parent = parent;
+ }
+}
\ No newline at end of file
Copied:
trunk/freenet/src/freenet/client/async/PersistentSendableRequestSet.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/PersistentSendableRequestSet.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/PersistentSendableRequestSet.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/PersistentSendableRequestSet.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,65 @@
+package freenet.client.async;
+
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+
+import freenet.node.SendableRequest;
+
+/**
+ * Just as with SectoredRandomGrabArray, activation is a big deal, and we can
+ * safely assume that == <=> equals(). So we use a vector, and hope it doesn't
+ * get too big (it won't in the near future). Any structure that might
conceivably
+ * call equals() is doomed, because either it requires activation (extra disk
+ * seek), or it will cause NPEs or messy code to avoid them. One option if size
+ * becomes a problem is to have individual objects in the database for each
+ * SendableRequest; this might involve many disk seeks, so is bad.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class PersistentSendableRequestSet implements SendableRequestSet {
+
+ private final ArrayList<SendableRequest> list;
+
+ PersistentSendableRequestSet() {
+ list = new ArrayList();
+ }
+
+ public synchronized boolean addRequest(SendableRequest req,
ObjectContainer container) {
+ container.activate(list, 1);
+ int idx = find(req);
+ if(idx == -1) {
+ list.add(req);
+ container.store(req);
+ /** Store to depth 1, otherwise it will update to depth
3 */
+ container.ext().store(list, 1);
+ return true;
+ } else return false;
+ }
+
+ private synchronized int find(SendableRequest req) {
+ for(int i=0;i<list.size();i++)
+ if(list.get(i) == req) return i;
+ return -1;
+ }
+
+ public synchronized SendableRequest[] listRequests(ObjectContainer
container) {
+ container.activate(list, 1);
+ return (SendableRequest[]) list.toArray(new
SendableRequest[list.size()]);
+ }
+
+ public synchronized boolean removeRequest(SendableRequest req,
ObjectContainer container) {
+ container.activate(list, 1);
+ int idx = find(req);
+ if(idx == -1) return false;
+ list.remove(idx);
+ container.ext().store(list, 2);
+ return true;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.activate(list, 1);
+ container.delete(list);
+ container.delete(this);
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/PutCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/PutCompletionCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/PutCompletionCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,5 +1,7 @@
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertException;
import freenet.client.Metadata;
import freenet.keys.BaseClientKey;
@@ -9,29 +11,31 @@
*/
public interface PutCompletionCallback {
- public void onSuccess(ClientPutState state);
+ public void onSuccess(ClientPutState state, ObjectContainer container,
ClientContext context);
- public void onFailure(InsertException e, ClientPutState state);
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context);
- public void onEncode(BaseClientKey usk, ClientPutState state);
+ /** Called when we know the final URI of the state in question. The
currentState eventually calls this
+ * on the ClientPutter, which relays to the fcp layer, which sends a
URIGenerated message. */
+ public void onEncode(BaseClientKey usk, ClientPutState state,
ObjectContainer container, ClientContext context);
- public void onTransition(ClientPutState oldState, ClientPutState
newState);
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container);
/** Only called if explicitly asked for, in which case, generally
* the metadata won't be inserted. Won't be called if there isn't
* any!
*/
- public void onMetadata(Metadata m, ClientPutState state);
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context);
/** Called when enough data has been inserted that the file can be
* retrieved, even if not all data has been inserted yet. Note that this
* is only supported for splitfiles; if you get onSuccess() first,
assume
* that onFetchable() isn't coming. */
- public void onFetchable(ClientPutState state);
+ public void onFetchable(ClientPutState state, ObjectContainer
container);
/** Called when the ClientPutState knows that it knows about
* all the blocks it will need to put.
*/
- public void onBlockSetFinished(ClientPutState state);
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context);
}
Copied: trunk/freenet/src/freenet/client/async/RegisterMe.java (from rev 26320,
branches/db4o/freenet/src/freenet/client/async/RegisterMe.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/RegisterMe.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/RegisterMe.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,40 @@
+package freenet.client.async;
+
+import freenet.node.SendableRequest;
+
+/**
+ * These must be deleted once the request has been registered.
+ * See DatastoreCheckerItem: this class only handles inserts.
+ * @author toad
+ */
+public class RegisterMe {
+ final SendableRequest nonGetRequest;
+ final ClientRequestSchedulerCore core;
+ final long addedTime;
+ final short priority;
+ /**
+ * Only set if the key is on the queue.
+ */
+ final long bootID;
+ private final int hashCode;
+ public final BlockSet blocks;
+
+ RegisterMe(SendableRequest nonGetRequest, short prio,
ClientRequestSchedulerCore core, BlockSet blocks, long bootID) {
+ this.bootID = bootID;
+ this.core = core;
+ this.nonGetRequest = nonGetRequest;
+ priority = prio;
+ addedTime = System.currentTimeMillis();
+ this.blocks = blocks;
+ int hash = core.hashCode();
+ if(nonGetRequest != null)
+ hash ^= nonGetRequest.hashCode();
+ hash *= prio;
+ hashCode = hash;
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+}
+
Modified: trunk/freenet/src/freenet/client/async/RequestCooldownQueue.java
===================================================================
--- trunk/freenet/src/freenet/client/async/RequestCooldownQueue.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/RequestCooldownQueue.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,10 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+
import freenet.keys.Key;
import freenet.node.SendableGet;
import freenet.support.Fields;
@@ -17,7 +21,7 @@
* circular buffer, we expand it if necessary.
* @author toad
*/
-public class RequestCooldownQueue {
+public class RequestCooldownQueue implements CooldownQueue {
/** keys which have been put onto the cooldown queue */
private Key[] keys;
@@ -57,10 +61,10 @@
this.cooldownTime = cooldownTime;
}
- /**
- * Add a key to the end of the queue. Returns the time at which it will
be valid again.
+ /* (non-Javadoc)
+ * @see freenet.client.async.CooldownQueue#add(freenet.keys.Key,
freenet.node.SendableGet)
*/
- synchronized long add(Key key, SendableGet client) {
+ public synchronized long add(Key key, SendableGet client,
ObjectContainer container) {
long removeTime = System.currentTimeMillis() + cooldownTime;
if(removeTime < getLastTime()) {
removeTime = getLastTime();
@@ -88,7 +92,7 @@
if(startPtr == 0) {
// No room
expandQueue();
- add(key, client);
+ add(key, client, null);
return;
} else {
// Wrap around
@@ -101,7 +105,7 @@
if(logMINOR) Logger.minor(this, "endPtr < startPtr");
if(endPtr == startPtr - 1) {
expandQueue();
- add(key, client);
+ add(key, client, null);
return;
} else {
endPtr++;
@@ -119,11 +123,11 @@
return;
}
- /**
- * Remove a key whose cooldown time has passed.
- * @return Either a Key or null if no keys have passed their cooldown
time.
+ /* (non-Javadoc)
+ * @see freenet.client.async.CooldownQueue#removeKeyBefore(long)
*/
- synchronized Key removeKeyBefore(long now) {
+ public synchronized Object removeKeyBefore(long now, long
dontCareAfterMillis, ObjectContainer container, int maxKeys) {
+ ArrayList v = new ArrayList();
boolean foundIT = false;
if(logDEBUG) {
foundIT = bigLog();
@@ -137,7 +141,10 @@
while(true) {
if(startPtr == endPtr) {
if(logMINOR) Logger.minor(this, "No keys
queued");
- return null;
+ if(!v.isEmpty())
+ return (Key[]) v.toArray(new
Key[v.size()]);
+ else
+ return null;
}
long time = times[startPtr];
Key key = keys[startPtr];
@@ -152,7 +159,12 @@
} else {
if(time > now) {
if(logMINOR) Logger.minor(this, "First
key is later at time "+time);
- return null;
+ if(!v.isEmpty())
+ return (Key[]) v.toArray(new
Key[v.size()]);
+ else if(time < (now +
dontCareAfterMillis))
+ return Long.valueOf(time);
+ else
+ return null;
}
times[startPtr] = 0;
keys[startPtr] = null;
@@ -161,7 +173,12 @@
if(startPtr == times.length) startPtr = 0;
}
if(logMINOR) Logger.minor(this, "Returning key "+key);
- return key;
+ v.add(key);
+ if(v.size() == maxKeys) {
+ if(!v.isEmpty())
+ return (Key[]) v.toArray(new
Key[v.size()]);
+ else return null;
+ }
}
}
@@ -227,10 +244,10 @@
return foundIT;
}
- /**
- * @return True if the key was found.
+ /* (non-Javadoc)
+ * @see freenet.client.async.CooldownQueue#removeKey(freenet.keys.Key,
freenet.node.SendableGet, long)
*/
- synchronized boolean removeKey(Key key, SendableGet client, long time) {
+ public synchronized boolean removeKey(Key key, SendableGet client, long
time, ObjectContainer container) {
if(time <= 0) return false; // We won't find it.
if(holes < 0) Logger.error(this, "holes = "+holes+" !!");
if(logMINOR) Logger.minor(this, "Remove key "+key+" client
"+client+" at time "+time+" startPtr="+startPtr+" endPtr="+endPtr+"
holes="+holes+" keys.length="+keys.length);
Copied: trunk/freenet/src/freenet/client/async/SendableRequestSet.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/SendableRequestSet.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/SendableRequestSet.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/SendableRequestSet.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,17 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+import freenet.node.SendableRequest;
+
+public interface SendableRequestSet {
+
+ public SendableRequest[] listRequests(ObjectContainer container);
+
+ public boolean addRequest(SendableRequest req, ObjectContainer
container);
+
+ public boolean removeRequest(SendableRequest req, ObjectContainer
container);
+
+ public void removeFrom(ObjectContainer container);
+
+}
Modified: trunk/freenet/src/freenet/client/async/SimpleBlockSet.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleBlockSet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SimpleBlockSet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
import java.util.HashMap;
import java.util.Set;
+import com.db4o.ObjectContainer;
+
import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.Key;
@@ -17,7 +19,7 @@
*/
public class SimpleBlockSet implements BlockSet {
- private final HashMap blocksByKey = new HashMap();
+ private final HashMap<Key, KeyBlock> blocksByKey = new HashMap<Key,
KeyBlock>();
public synchronized void add(KeyBlock block) {
blocksByKey.put(block.getKey(), block);
@@ -41,5 +43,23 @@
return null;
}
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ /* Storing a BlockSet is not supported. There are some
complications, so lets
+ * not implement this until FCP supports it (currently we can't
do fetch-from-blob,
+ * we can only do fetch-to-blob and insert-blob).
+ *
+ * The major problems are:
+ * - In both CHKBlock and SSKBlock, who is responsible for
deleting the node keys? We
+ * have to have them in the objects.
+ * - In SSKBlock, who is responsible for deleting the
DSAPublicKey? And the DSAGroup?
+ * A group might be unique or might be shared between very
many SSKs...
+ *
+ * Especially in the second case, we don't want to just copy
every time even for
+ * transient uses ... the best solution may be to copy in
objectCanNew(), but even
+ * then callers to the relevant getter methods may be a worry.
+ */
+ throw new UnsupportedOperationException("Block set storage in
database not supported");
+ }
}
Modified: trunk/freenet/src/freenet/client/async/SimpleHealingQueue.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleHealingQueue.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SimpleHealingQueue.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,12 +5,15 @@
import java.util.HashMap;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertContext;
import freenet.client.InsertException;
import freenet.client.Metadata;
import freenet.keys.BaseClientKey;
import freenet.keys.CHKBlock;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -21,14 +24,20 @@
InsertContext ctx;
final HashMap runningInserters;
- public SimpleHealingQueue(ClientRequestScheduler scheduler,
InsertContext context, short prio, int maxRunning) {
- super(prio, scheduler, null, context);
+ public SimpleHealingQueue(InsertContext context, short prio, int
maxRunning) {
+ super(prio, new RequestClient() {
+ public boolean persistent() {
+ return false;
+ }
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ } });
this.ctx = context;
this.runningInserters = new HashMap();
this.maxRunning = maxRunning;
}
- public boolean innerQueue(Bucket data) {
+ public boolean innerQueue(Bucket data, ClientContext context) {
SingleBlockInserter sbi;
int ctr;
synchronized(this) {
@@ -37,7 +46,7 @@
try {
sbi = new SingleBlockInserter(this, data,
(short)-1,
FreenetURI.EMPTY_CHK_URI, ctx, this, false,
- CHKBlock.DATA_LENGTH,
ctr, false, false, false, data, true);
+ CHKBlock.DATA_LENGTH,
ctr, false, false, false, data, null, context, false, true);
} catch (Throwable e) {
Logger.error(this, "Caught trying to insert
healing block: "+e, e);
return false;
@@ -45,7 +54,7 @@
runningInserters.put(data, sbi);
}
try {
- sbi.schedule();
+ sbi.schedule(null, context);
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Started healing insert
"+ctr+" for "+data);
return true;
@@ -55,13 +64,13 @@
}
}
- public void queue(Bucket data) {
- if(!innerQueue(data))
+ public void queue(Bucket data, ClientContext context) {
+ if(!innerQueue(data, context))
data.free();
}
@Override
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
@@ -76,11 +85,11 @@
}
@Override
- public void notifyClients() {
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
// Do nothing
}
- public void onSuccess(ClientPutState state) {
+ public void onSuccess(ClientPutState state, ObjectContainer container,
ClientContext context) {
SingleBlockInserter sbi = (SingleBlockInserter)state;
Bucket data = (Bucket) sbi.getToken();
synchronized(this) {
@@ -91,7 +100,7 @@
data.free();
}
- public void onFailure(InsertException e, ClientPutState state) {
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
SingleBlockInserter sbi = (SingleBlockInserter)state;
Bucket data = (Bucket) sbi.getToken();
synchronized(this) {
@@ -102,31 +111,35 @@
data.free();
}
- public void onEncode(BaseClientKey usk, ClientPutState state) {
+ public void onEncode(BaseClientKey usk, ClientPutState state,
ObjectContainer container, ClientContext context) {
// Ignore
}
- public void onTransition(ClientPutState oldState, ClientPutState
newState) {
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container) {
// Should never happen
Logger.error(this, "impossible: onTransition on
SimpleHealingQueue from "+oldState+" to "+newState, new Exception("debug"));
}
- public void onMetadata(Metadata m, ClientPutState state) {
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
// Should never happen
Logger.error(this, "Got metadata on SimpleHealingQueue from
"+state+": "+m, new Exception("debug"));
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
// Ignore
}
- public void onFetchable(ClientPutState state) {
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
// Ignore
}
@Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore
}
+ public void cancel(ObjectContainer container, ClientContext context) {
+ super.cancel();
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,16 +3,18 @@
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.OutputStream;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
+import com.db4o.ObjectContainer;
+
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarOutputStream;
@@ -27,65 +29,123 @@
import freenet.client.events.SplitfileProgressEvent;
import freenet.keys.BaseClientKey;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.io.BucketTools;
+import freenet.support.io.NativeThread;
public class SimpleManifestPutter extends BaseClientPutter implements
PutCompletionCallback {
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
// Only implements PutCompletionCallback for the final metadata insert
-
private class PutHandler extends BaseClientPutter implements
PutCompletionCallback {
- protected PutHandler(final SimpleManifestPutter smp, String
name, Bucket data, ClientMetadata cm, boolean getCHKOnly) throws
InsertException {
- super(smp.priorityClass, smp.chkScheduler,
smp.sskScheduler, smp.client);
+ protected PutHandler(final SimpleManifestPutter smp, String
name, Bucket data, ClientMetadata cm, boolean getCHKOnly) {
+ super(smp.priorityClass, smp.client);
+ this.persistent =
SimpleManifestPutter.this.persistent();
this.cm = cm;
this.data = data;
InsertBlock block =
- new InsertBlock(data, cm,
FreenetURI.EMPTY_CHK_URI);
+ new InsertBlock(data, cm, persistent() ?
FreenetURI.EMPTY_CHK_URI.clone() : FreenetURI.EMPTY_CHK_URI);
this.origSFI =
- new SingleFileInserter(this, this, block,
false, ctx, false, getCHKOnly, true, null, null, true, null, earlyEncode);
+ new SingleFileInserter(this, this, block,
false, ctx, false, getCHKOnly, true, null, null, false, null, earlyEncode);
metadata = null;
+ containerHandle = null;
}
protected PutHandler(final SimpleManifestPutter smp, String
name, FreenetURI target, ClientMetadata cm) {
- super(smp.getPriorityClass(), smp.chkScheduler,
smp.sskScheduler, smp.client);
+ super(smp.getPriorityClass(), smp.client);
+ this.persistent =
SimpleManifestPutter.this.persistent();
this.cm = cm;
this.data = null;
Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT,
null, null, target, cm);
metadata = m;
+ if(logMINOR) Logger.minor(this, "Simple redirect
metadata: "+m);
origSFI = null;
+ containerHandle = null;
}
protected PutHandler(final SimpleManifestPutter smp, String
name, String targetInArchive, ClientMetadata cm, Bucket data) {
- super(smp.getPriorityClass(), smp.chkScheduler,
smp.sskScheduler, smp.client);
+ super(smp.getPriorityClass(), smp.client);
+ this.persistent =
SimpleManifestPutter.this.persistent();
this.cm = cm;
this.data = data;
this.targetInArchive = targetInArchive;
Metadata m = new
Metadata(Metadata.ARCHIVE_INTERNAL_REDIRECT, null, null, targetInArchive, cm);
metadata = m;
+ if(logMINOR) Logger.minor(this, "Internal redirect:
"+m);
origSFI = null;
+ containerHandle = null;
}
- private SingleFileInserter origSFI;
+ private ClientPutState origSFI;
+ private ClientPutState currentState;
private ClientMetadata cm;
private Metadata metadata;
private String targetInArchive;
private final Bucket data;
+ private final boolean persistent;
+ private final PutHandler containerHandle;
- public void start() throws InsertException {
+ public void start(ObjectContainer container, ClientContext
context) throws InsertException {
if (origSFI == null) {
- Logger.error(this, "origSFI is null on start(),
should be impossible", new Exception("debug"));
- return;
+ Logger.error(this, "origSFI is null on
start(), should be impossible", new Exception("debug"));
+ return;
}
if (metadata != null) {
- Logger.error(this, "metadata=" + metadata + "
on start(), should be impossible", new Exception("debug"));
+ Logger.error(this, "metdata=" + metadata + " on
start(), should be impossible", new Exception("debug"));
return;
}
- origSFI.start(null);
- origSFI = null;
+ ClientPutState sfi;
+ synchronized(this) {
+ sfi = origSFI;
+ currentState = sfi;
+ origSFI = null;
+ }
+ if(persistent) {
+ container.activate(sfi, 1);
+ container.store(this);
+ }
+ sfi.schedule(container, context);
+ if(persistent) {
+ container.deactivate(sfi, 1);
+ container.store(this);
+ }
}
@Override
+ public void cancel(ObjectContainer container, ClientContext
context) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Cancelling "+this, new
Exception("debug"));
+ ClientPutState oldState = null;
+ synchronized(this) {
+ if(cancelled) return;
+ super.cancel();
+ oldState = currentState;
+ }
+ if(persistent()) {
+ container.store(this);
+ if(oldState != null)
+ container.activate(oldState, 1);
+ }
+ if(oldState != null) oldState.cancel(container,
context);
+ onFailure(new
InsertException(InsertException.CANCELLED), null, container, context);
+ }
+
+ @Override
public FreenetURI getURI() {
return null;
}
@@ -95,52 +155,165 @@
return SimpleManifestPutter.this.finished || cancelled
|| SimpleManifestPutter.this.cancelled;
}
- public void onSuccess(ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void onSuccess(ClientPutState state, ObjectContainer
container, ClientContext context) {
if(logMINOR) Logger.minor(this, "Completed "+this);
- SimpleManifestPutter.this.onFetchable(this);
+ if(persistent) {
+ container.activate(SimpleManifestPutter.this,
1);
+ container.activate(runningPutHandlers, 2);
+ }
+ SimpleManifestPutter.this.onFetchable(this, container);
+ ClientPutState oldState;
+ boolean insertedAllFiles = true;
+ synchronized(this) {
+ oldState = currentState;
+ currentState = null;
+ }
synchronized(SimpleManifestPutter.this) {
+ if(persistent) container.store(this);
runningPutHandlers.remove(this);
+ if(persistent) {
+
container.ext().store(runningPutHandlers, 2);
+
container.activate(putHandlersWaitingForMetadata, 2);
+ }
+
if(putHandlersWaitingForMetadata.contains(this)) {
+
putHandlersWaitingForMetadata.remove(this);
+
container.ext().store(putHandlersWaitingForMetadata, 2);
+ Logger.error(this, "PutHandler was in
waitingForMetadata in onSuccess() on "+this+" for "+SimpleManifestPutter.this);
+ }
+
+ if(persistent) {
+
container.deactivate(putHandlersWaitingForMetadata, 1);
+ container.activate(waitingForBlockSets,
2);
+ }
+ if(waitingForBlockSets.contains(this)) {
+ waitingForBlockSets.remove(this);
+ container.store(waitingForBlockSets);
+ Logger.error(this, "PutHandler was in
waitingForBlockSets in onSuccess() on "+this+" for "+SimpleManifestPutter.this);
+ }
+ if(persistent) {
+
container.deactivate(waitingForBlockSets, 1);
+
container.deactivate(putHandlersWaitingForFetchable, 1);
+
container.activate(putHandlersWaitingForFetchable, 2);
+ }
+
if(putHandlersWaitingForFetchable.contains(this)) {
+
putHandlersWaitingForFetchable.remove(this);
+
container.ext().store(putHandlersWaitingForFetchable, 2);
+ // Not getting an onFetchable is not
unusual, just ignore it.
+ if(logMINOR) Logger.minor(this,
"PutHandler was in waitingForFetchable in onSuccess() on "+this+" for
"+SimpleManifestPutter.this);
+ }
+ if(persistent)
+
container.deactivate(putHandlersWaitingForFetchable, 1);
+
if(!runningPutHandlers.isEmpty()) {
- return;
+ if(logMINOR) {
+ Logger.minor(this, "Running put
handlers: "+runningPutHandlers.size());
+ for(Object o :
runningPutHandlers) {
+ boolean activated =
true;
+ if(persistent) {
+ activated =
container.ext().isActive(o);
+ if(!activated)
container.activate(o, 1);
+ }
+ Logger.minor(this,
"Still running: "+o);
+ if(!activated)
+
container.deactivate(o, 1);
+ }
+ }
+ insertedAllFiles = false;
}
}
- insertedAllFiles();
+ if(oldState != null && oldState != state && persistent)
{
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ } else if(state != null && persistent) {
+ state.removeFrom(container, context);
+ }
+ if(insertedAllFiles)
+ insertedAllFiles(container, context);
+ if(persistent) {
+ container.deactivate(runningPutHandlers, 1);
+ container.deactivate(SimpleManifestPutter.this,
1);
+ }
}
- public void onFailure(InsertException e, ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ ClientPutState oldState;
+ synchronized(this) {
+ oldState = currentState;
+ currentState = null;
+ }
+ if(oldState != null && oldState != state && persistent)
{
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ } else if(state != null && persistent) {
+ state.removeFrom(container, context);
+ }
if(logMINOR) Logger.minor(this, "Failed: "+this+" -
"+e, e);
- fail(e);
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ fail(e, container, context);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
if(logMINOR) Logger.minor(this, "onEncode("+key+") for
"+this);
if(metadata == null) {
// The file was too small to have its own
metadata, we get this instead.
// So we make the key into metadata.
+ if(persistent) {
+ container.activate(key, 5);
+
container.activate(SimpleManifestPutter.this, 1);
+ }
Metadata m =
new Metadata(Metadata.SIMPLE_REDIRECT,
null, null, key.getURI(), cm);
- onMetadata(m, null);
+ onMetadata(m, null, container, context);
+ if(persistent) {
+
container.deactivate(SimpleManifestPutter.this, 1);
+ }
}
}
- public void onTransition(ClientPutState oldState,
ClientPutState newState) {}
+ /**
+ * The caller of onTransition removes the old state, so we
don't have to.
+ * However, in onSuccess or onFailure, we need to remove the
new state, even if
+ * what is passed in is different (in which case we remove that
too).
+ */
+ public void onTransition(ClientPutState oldState,
ClientPutState newState, ObjectContainer container) {
+ if(newState == null) throw new NullPointerException();
+
+ // onTransition is *not* responsible for removing the
old state, the caller is.
+ synchronized (this) {
+ if (currentState == oldState) {
+ currentState = newState;
+ if(persistent())
+ container.store(this);
+ return;
+ }
+ }
+ Logger.error(this, "onTransition: cur=" + currentState
+ ", old=" + oldState + ", new=" + newState);
+ }
- public void onMetadata(Metadata m, ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Assigning metadata:
"+m+" for "+this+" from "+state,
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(logMINOR) Logger.minor(this, "Assigning metadata:
"+m+" for "+this+" from "+state+" persistent="+persistent,
new Exception("debug"));
if(metadata != null) {
Logger.error(this, "Reassigning metadata", new
Exception("debug"));
return;
}
metadata = m;
+ if(persistent) {
+ container.activate(SimpleManifestPutter.this,
1);
+
container.activate(putHandlersWaitingForMetadata, 2);
+ }
boolean allMetadatas = false;
synchronized(SimpleManifestPutter.this) {
putHandlersWaitingForMetadata.remove(this);
+ if(persistent) {
+
container.ext().store(putHandlersWaitingForMetadata, 2);
+ container.store(this);
+ }
allMetadatas =
putHandlersWaitingForMetadata.isEmpty();
if(!allMetadatas) {
if(logMINOR)
@@ -149,7 +322,7 @@
}
if(allMetadatas) {
// Will resolve etc.
- gotAllMetadata();
+ gotAllMetadata(container, context);
} else {
// Resolve now to speed up the insert.
try {
@@ -158,92 +331,196 @@
throw new
MetadataUnresolvedException(new Metadata[] { m }, "Too big");
} catch (MetadataUnresolvedException e) {
try {
- resolve(e);
+ resolve(e, container, context);
} catch (IOException e1) {
- fail(new
InsertException(InsertException.BUCKET_ERROR, e1, null));
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e1, null), container, context);
return;
} catch (InsertException e1) {
- fail(e1);
+ fail(e1, container, context);
}
}
}
+ if(persistent) {
+
container.deactivate(putHandlersWaitingForMetadata, 1);
+ container.deactivate(SimpleManifestPutter.this,
1);
+ }
}
@Override
- public void addBlock() {
- SimpleManifestPutter.this.addBlock();
+ public void addBlock(ObjectContainer container) {
+ if(persistent) {
+ container.activate(SimpleManifestPutter.this,
1);
+ }
+ SimpleManifestPutter.this.addBlock(container);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void addBlocks(int num) {
- SimpleManifestPutter.this.addBlocks(num);
+ public void addBlocks(int num, ObjectContainer container) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.addBlocks(num, container);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void completedBlock(boolean dontNotify) {
- SimpleManifestPutter.this.completedBlock(dontNotify);
+ public void completedBlock(boolean dontNotify, ObjectContainer
container, ClientContext context) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.completedBlock(dontNotify,
container, context);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void failedBlock() {
- SimpleManifestPutter.this.failedBlock();
+ public void failedBlock(ObjectContainer container,
ClientContext context) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.failedBlock(container,
context);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void fatallyFailedBlock() {
- SimpleManifestPutter.this.fatallyFailedBlock();
+ public void fatallyFailedBlock(ObjectContainer container,
ClientContext context) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.fatallyFailedBlock(container,
context);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void addMustSucceedBlocks(int blocks) {
- SimpleManifestPutter.this.addMustSucceedBlocks(blocks);
+ public void addMustSucceedBlocks(int blocks, ObjectContainer
container) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.addMustSucceedBlocks(blocks,
container);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void notifyClients() {
- // FIXME generate per-filename events???
+ public void notifyClients(ObjectContainer container,
ClientContext context) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.notifyClients(container,
context);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(SimpleManifestPutter.this,
1);
+ container.activate(waitingForBlockSets, 2);
+ }
+ boolean allBlockSets = false;
synchronized(SimpleManifestPutter.this) {
waitingForBlockSets.remove(this);
- if(!waitingForBlockSets.isEmpty()) return;
+ if(persistent)
+ container.store(waitingForBlockSets);
+ allBlockSets = waitingForBlockSets.isEmpty();
}
- SimpleManifestPutter.this.blockSetFinalized();
+ if(allBlockSets)
+
SimpleManifestPutter.this.blockSetFinalized(container, context);
+ if(persistent) {
+ container.deactivate(waitingForBlockSets, 1);
+ container.deactivate(SimpleManifestPutter.this,
1);
+ }
}
@Override
- public void onMajorProgress() {
- SimpleManifestPutter.this.onMajorProgress();
+ public void onMajorProgress(ObjectContainer container) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.onMajorProgress(container);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
- public void onFetchable(ClientPutState state) {
- SimpleManifestPutter.this.onFetchable(this);
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
+ if(persistent)
+ container.activate(SimpleManifestPutter.this,
1);
+ SimpleManifestPutter.this.onFetchable(this, container);
+ if(persistent)
+ container.deactivate(SimpleManifestPutter.this,
1);
}
@Override
- public void onTransition(ClientGetState oldState,
ClientGetState newState) {
+ public void onTransition(ClientGetState oldState,
ClientGetState newState, ObjectContainer container) {
// Ignore
}
+ public void clearMetadata(ObjectContainer container) {
+ metadata = null;
+ if(persistent) container.store(this);
+ }
+
+ @Override
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "Removing "+this);
+ ClientPutState oldSFI;
+ ClientPutState oldState;
+ synchronized(this) {
+ oldSFI = origSFI;
+ oldState = currentState;
+ oldSFI = null;
+ currentState = null;
+ }
+ if(oldSFI != null) {
+ Logger.error(this, "origSFI is set in
removeFrom() on "+this+" for "+SimpleManifestPutter.this, new
Exception("debug"));
+ container.activate(oldSFI, 1);
+ oldSFI.cancel(container, context);
+ oldSFI.removeFrom(container, context);
+ if(oldState == oldSFI) oldState = null;
+ }
+ if(oldState != null) {
+ Logger.error(this, "currentState is set in
removeFrom() on "+this+" for "+SimpleManifestPutter.this, new
Exception("debug"));
+ container.activate(oldState, 1);
+ oldState.cancel(container, context);
+ oldState.removeFrom(container, context);
+ }
+ if(cm != null) {
+ container.activate(cm, 5);
+ cm.removeFrom(container);
+ }
+ if(metadata != null) {
+ // Possible if cancelled
+ Logger.error(this, "Metadata is set in
removeFrom() on "+this+" for "+SimpleManifestPutter.this);
+ container.activate(metadata, 1);
+ metadata.removeFrom(container);
+ }
+ // Data is responsibility of original caller (usually
ClientPutDir), we don't support freeData atm
+ super.removeFrom(container, context);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(cancelled) {
+ Logger.error(this, "Storing "+this+" when
already cancelled!", new Exception("error"));
+ return false;
+ }
+ if(logMINOR) Logger.minor(this, "Storing "+this+"
activated="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("debug"));
+ return true;
+ }
+
}
- static boolean logMINOR;
- private final HashMap putHandlersByName;
- private final HashSet runningPutHandlers;
- private final HashSet putHandlersWaitingForMetadata;
- private final HashSet waitingForBlockSets;
- private final HashSet putHandlersWaitingForFetchable;
+ private HashMap<String,Object> putHandlersByName;
+ private HashSet<PutHandler> runningPutHandlers;
+ private HashSet<PutHandler> putHandlersWaitingForMetadata;
+ private HashSet<PutHandler> waitingForBlockSets;
+ private HashSet<PutHandler> putHandlersWaitingForFetchable;
private FreenetURI finalURI;
private FreenetURI targetURI;
private boolean finished;
private final InsertContext ctx;
- private final ClientCallback cb;
+ final ClientCallback cb;
private final boolean getCHKOnly;
private boolean insertedAllFiles;
private boolean insertedManifest;
- private final HashMap metadataPuttersByMetadata;
- private final HashMap metadataPuttersUnfetchable;
+ private final HashMap<Metadata,ClientPutState>
metadataPuttersByMetadata;
+ private final HashMap<Metadata,ClientPutState>
metadataPuttersUnfetchable;
private final String defaultName;
private int numberOfFiles;
private long totalSize;
@@ -253,29 +530,31 @@
private final static String[] defaultDefaultNames =
new String[] { "index.html", "index.htm", "default.html",
"default.htm" };
private int bytesOnZip;
- private LinkedList<PutHandler> elementsToPutInArchive;
+ private ArrayList<PutHandler> elementsToPutInArchive;
private boolean fetchable;
private final boolean earlyEncode;
- public SimpleManifestPutter(ClientCallback cb, ClientRequestScheduler
chkSched,
- ClientRequestScheduler sskSched, HashMap
manifestElements, short prioClass, FreenetURI target,
- String defaultName, InsertContext ctx, boolean
getCHKOnly, Object clientContext, boolean earlyEncode) throws InsertException {
- super(prioClass, chkSched, sskSched, clientContext);
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public SimpleManifestPutter(ClientCallback cb,
+ HashMap<String, Object> manifestElements, short
prioClass, FreenetURI target,
+ String defaultName, InsertContext ctx, boolean
getCHKOnly, RequestClient clientContext, boolean earlyEncode) {
+ super(prioClass, clientContext);
this.defaultName = defaultName;
- this.targetURI = target;
+ if(client.persistent())
+ this.targetURI = target.clone();
+ else
+ this.targetURI = target;
this.cb = cb;
this.ctx = ctx;
this.getCHKOnly = getCHKOnly;
this.earlyEncode = earlyEncode;
- putHandlersByName = new HashMap();
- runningPutHandlers = new HashSet();
- putHandlersWaitingForMetadata = new HashSet();
- putHandlersWaitingForFetchable = new HashSet();
- waitingForBlockSets = new HashSet();
- metadataPuttersByMetadata = new HashMap();
- metadataPuttersUnfetchable = new HashMap();
- elementsToPutInArchive = new LinkedList();
+ putHandlersByName = new HashMap<String,Object>();
+ runningPutHandlers = new HashSet<PutHandler>();
+ putHandlersWaitingForMetadata = new HashSet<PutHandler>();
+ putHandlersWaitingForFetchable = new HashSet<PutHandler>();
+ waitingForBlockSets = new HashSet<PutHandler>();
+ metadataPuttersByMetadata = new
HashMap<Metadata,ClientPutState>();
+ metadataPuttersUnfetchable = new
HashMap<Metadata,ClientPutState>();
+ elementsToPutInArchive = new ArrayList<PutHandler>();
makePutHandlers(manifestElements, putHandlersByName);
checkZips();
}
@@ -285,18 +564,23 @@
// FIXME do something.
}
- public void start() throws InsertException {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void start(ObjectContainer container, ClientContext context)
throws InsertException {
if (logMINOR)
- Logger.minor(this, "Starting " + this);
+ Logger.minor(this, "Starting " + this+"
persistence="+persistent());
PutHandler[] running;
+ if(persistent()) {
+ container.activate(runningPutHandlers, 2);
+ }
synchronized (this) {
- running = (PutHandler[]) runningPutHandlers.toArray(new
PutHandler[runningPutHandlers.size()]);
+ running = runningPutHandlers.toArray(new
PutHandler[runningPutHandlers.size()]);
}
try {
+ boolean persistent = persistent(); // this might get
deactivated ...
for (int i = 0; i < running.length; i++) {
- running[i].start();
+ running[i].start(container, context);
+ if(persistent &&
!container.ext().isActive(this))
+ container.activate(this, 1);
if (logMINOR)
Logger.minor(this, "Started " + i + "
of " + running.length);
if (isFinished()) {
@@ -309,27 +593,34 @@
Logger.minor(this, "Started " + running.length
+ " PutHandler's for " + this);
if (running.length == 0) {
insertedAllFiles = true;
- gotAllMetadata();
+ if(persistent())
+ container.store(this);
+ gotAllMetadata(container, context);
}
} catch (InsertException e) {
- cancelAndFinish();
+ synchronized(this) {
+ finished = true;
+ }
+ cancelAndFinish(container, context);
throw e;
}
}
- private void makePutHandlers(HashMap manifestElements, HashMap
putHandlersByName) throws InsertException {
+ protected void makePutHandlers(HashMap<String, Object>
manifestElements, HashMap<String,Object> putHandlersByName) {
makePutHandlers(manifestElements, putHandlersByName, "/");
}
- private void makePutHandlers(HashMap manifestElements, HashMap
putHandlersByName, String ZipPrefix) throws InsertException {
- Iterator it = manifestElements.keySet().iterator();
+ private void makePutHandlers(HashMap<String, Object> manifestElements,
HashMap<String,Object> putHandlersByName, String ZipPrefix) {
+ Iterator<String> it = manifestElements.keySet().iterator();
while(it.hasNext()) {
- String name = (String) it.next();
+ String name = it.next();
Object o = manifestElements.get(name);
if(o instanceof HashMap) {
- HashMap subMap = new HashMap();
+ HashMap<String,Object> subMap = new
HashMap<String,Object>();
putHandlersByName.put(name, subMap);
- makePutHandlers((HashMap)o, subMap,
ZipPrefix+name+ '/');
+ makePutHandlers((HashMap<String, Object>)o,
subMap, ZipPrefix+name+ '/');
+ if(Logger.shouldLog(Logger.DEBUG, this))
+ Logger.debug(this, "Sub map for
"+name+" : "+subMap.size()+" elements from "+((HashMap)o).size());
} else {
ManifestElement element = (ManifestElement) o;
String mimeType = element.mimeOverride;
@@ -362,22 +653,21 @@
(bytesOnZip + sz <
(2038-64)*1024)) { // totally dumb heuristic!
bytesOnZip += sz;
// Put it in the zip.
+ if(logMINOR)
+ Logger.minor(this,
"Putting into ZIP: "+name);
ph = new PutHandler(this, name,
ZipPrefix+element.fullName, cm, data);
if(logMINOR)
Logger.minor(this,
"Putting file into container: "+element.fullName+" : "+ph);
-
elementsToPutInArchive.addLast(ph);
+ elementsToPutInArchive.add(ph);
numberOfFiles++;
totalSize += data.size();
} else {
- try {
ph = new
PutHandler(this,name, data, cm, getCHKOnly);
- } catch (InsertException e) {
- cancelAndFinish();
- throw e;
- }
runningPutHandlers.add(ph);
putHandlersWaitingForMetadata.add(ph);
putHandlersWaitingForFetchable.add(ph);
+ if(logMINOR)
+ Logger.minor(this,
"Inserting separately as PutHandler: "+name+" : "+ph+"
persistent="+ph.persistent()+":"+ph.persistent+" "+persistent());
numberOfFiles++;
totalSize += data.size();
}
@@ -397,14 +687,53 @@
return finished || cancelled;
}
- private void gotAllMetadata() {
+ private final DBJob runGotAllMetadata = new DBJob() {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+ context.jobRunner.removeRestartJob(this,
NativeThread.NORM_PRIORITY, container);
+ container.activate(SimpleManifestPutter.this, 1);
+ innerGotAllMetadata(container, context);
+ container.deactivate(SimpleManifestPutter.this, 1);
+ }
+
+ };
+
+ /**
+ * Called when we have metadata for all the PutHandler's.
+ * This does *not* necessarily mean we can immediately insert the final
metadata, since
+ * if these metadata's are too big, they will need to be inserted
separately. See
+ * resolveAndStartBase().
+ * @param container
+ * @param context
+ */
+ private void gotAllMetadata(ObjectContainer container, ClientContext
context) {
+ // This can be huge! Run it on its own transaction to minimize
the build up of stuff to commit
+ // and maximise the opportunities for garbage collection.
+ if(persistent()) {
+ container.activate(runGotAllMetadata, 1); // need to
activate .this!
+ context.jobRunner.queueRestartJob(runGotAllMetadata,
NativeThread.NORM_PRIORITY, container, false);
+ context.jobRunner.queue(runGotAllMetadata,
NativeThread.NORM_PRIORITY, false);
+ } else {
+ innerGotAllMetadata(null, context);
+ }
+ }
+
+ /**
+ * Generate the global metadata, and then call resolveAndStartBase.
+ * @param container
+ * @param context
+ */
+ private void innerGotAllMetadata(ObjectContainer container,
ClientContext context) {
+ if(persistent()) {
+ container.activate(putHandlersByName, 2); // depth 2 to
load elements
+ }
if(logMINOR) Logger.minor(this, "Got all metadata");
- HashMap namesToByteArrays = new HashMap();
- namesToByteArrays(putHandlersByName, namesToByteArrays);
+ HashMap<String, Object> namesToByteArrays = new HashMap<String,
Object>();
+ namesToByteArrays(putHandlersByName, namesToByteArrays,
container);
if(defaultName != null) {
Metadata meta = (Metadata)
namesToByteArrays.get(defaultName);
if(meta == null) {
- fail(new
InsertException(InsertException.INVALID_URI, "Default name "+defaultName+" does
not exist", null));
+ fail(new
InsertException(InsertException.INVALID_URI, "Default name "+defaultName+" does
not exist", null), container, context);
return;
}
namesToByteArrays.put("", meta);
@@ -420,35 +749,56 @@
}
baseMetadata =
Metadata.mkRedirectionManifestWithMetadata(namesToByteArrays);
- resolveAndStartBase();
+ if(persistent()) {
+ container.store(baseMetadata);
+ container.store(this);
+ }
+ resolveAndStartBase(container, context);
}
- private void resolveAndStartBase() {
+ /**
+ * Attempt to insert the base metadata and the container. If the base
metadata cannot be resolved,
+ * try to resolve it: start inserts for each part that cannot be
resolved, and wait for them to generate
+ * URIs that can be incorporated into the metadata. This method will
then be called again, and will
+ * complete, or do more resolutions as necessary.
+ * @param container
+ * @param context
+ */
+ private void resolveAndStartBase(ObjectContainer container,
ClientContext context) {
Bucket bucket = null;
synchronized(this) {
if(hasResolvedBase) return;
}
while(true) {
try {
- bucket =
BucketTools.makeImmutableBucket(ctx.bf, baseMetadata.writeToByteArray());
+ if(persistent())
+ container.activate(baseMetadata,
Integer.MAX_VALUE);
+ bucket =
BucketTools.makeImmutableBucket(context.getBucketFactory(persistent()),
baseMetadata.writeToByteArray());
if(logMINOR)
Logger.minor(this, "Metadata bucket is
"+bucket.size()+" bytes long");
break;
} catch (IOException e) {
- fail(new
InsertException(InsertException.BUCKET_ERROR, e, null));
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
return;
} catch (MetadataUnresolvedException e) {
try {
// Start the insert for the
sub-Metadata.
// Eventually it will generate a URI
and call onEncode(), which will call back here.
- resolve(e);
+ if(logMINOR) Logger.minor(this, "Main
metadata needs resolving: "+e);
+ resolve(e, container, context);
+ if(persistent())
+
container.deactivate(baseMetadata, 1);
return;
} catch (IOException e1) {
- fail(new
InsertException(InsertException.BUCKET_ERROR, e, null));
+ if(persistent())
+
container.deactivate(baseMetadata, 1);
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
return;
} catch (InsertException e2) {
- fail(e2);
+ if(persistent())
+
container.deactivate(baseMetadata, 1);
+ fail(e2, container, context);
return;
}
}
@@ -458,6 +808,10 @@
if(hasResolvedBase) return;
hasResolvedBase = true;
}
+ if(persistent()) {
+ container.store(this);
+ container.activate(elementsToPutInArchive, 2);
+ }
InsertBlock block;
boolean isMetadata = true;
boolean insertAsArchiveManifest = false;
@@ -468,13 +822,14 @@
// We have the metadata, fortunately enough, because
everything has been resolve()d.
// So all we need to do is create the actual archive.
try {
- Bucket outputBucket =
ctx.bf.makeBucket(baseMetadata.dataLength());
+ Bucket outputBucket =
context.getBucketFactory(persistent()).makeBucket(baseMetadata.dataLength());
// TODO: try both ? - maybe not worth it
archiveType = ARCHIVE_TYPE.getDefault();
String mimeType = (archiveType ==
ARCHIVE_TYPE.TAR ?
- createTarBucket(bucket, outputBucket) :
- createZipBucket(bucket, outputBucket));
+ createTarBucket(bucket, outputBucket,
container) :
+ createZipBucket(bucket, outputBucket,
container));
bucket.free();
+ if(persistent()) bucket.removeFrom(container);
if(logMINOR) Logger.minor(this, "We are using
"+archiveType);
@@ -483,28 +838,49 @@
// Can we just insert it, and not bother with a
redirect to it?
// Thereby exploiting implicit manifest
support, which will pick up on .metadata??
// We ought to be able to !!
- block = new InsertBlock(outputBucket, new
ClientMetadata(mimeType), targetURI);
+ if(persistent()) container.activate(targetURI,
5);
+ block = new InsertBlock(outputBucket, new
ClientMetadata(mimeType), persistent() ? targetURI.clone() : targetURI);
isMetadata = false;
insertAsArchiveManifest = true;
} catch (IOException e) {
- fail(new
InsertException(InsertException.BUCKET_ERROR, e, null));
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ if(persistent())
+ container.deactivate(baseMetadata, 1);
return;
}
} else
- block = new InsertBlock(bucket, null, targetURI);
+ block = new InsertBlock(bucket, null, persistent() ?
targetURI.clone() : targetURI);
+ SingleFileInserter metadataInserter;
try {
- SingleFileInserter metadataInserter =
+ metadataInserter =
new SingleFileInserter(this, this, block,
isMetadata, ctx, (archiveType == ARCHIVE_TYPE.ZIP) , getCHKOnly, false,
baseMetadata, archiveType, true, null, earlyEncode);
- if(logMINOR) Logger.minor(this, "Inserting main
metadata: "+metadataInserter);
+ if(logMINOR) Logger.minor(this, "Inserting main
metadata: "+metadataInserter+" for "+baseMetadata);
+ if(persistent()) {
+ container.activate(metadataPuttersByMetadata,
2);
+ container.activate(metadataPuttersUnfetchable,
2);
+ }
this.metadataPuttersByMetadata.put(baseMetadata,
metadataInserter);
metadataPuttersUnfetchable.put(baseMetadata,
metadataInserter);
- metadataInserter.start(null);
+ if(persistent()) {
+ container.store(metadataPuttersByMetadata);
+ container.store(metadataPuttersUnfetchable);
+ container.deactivate(metadataPuttersByMetadata,
1);
+
container.deactivate(metadataPuttersUnfetchable, 1);
+ container.deactivate(baseMetadata, 1);
+
+ }
+ metadataInserter.start(null, container, context);
} catch (InsertException e) {
- fail(e);
+ fail(e, container, context);
+ return;
}
+ if(persistent()) {
+ container.deactivate(metadataInserter, 1);
+ container.deactivate(elementsToPutInArchive, 1);
+ }
}
- private String createTarBucket(Bucket inputBucket, Bucket outputBucket)
throws IOException {
+ private String createTarBucket(Bucket inputBucket, Bucket outputBucket,
ObjectContainer container) throws IOException {
if(logMINOR) Logger.minor(this, "Create a TAR Bucket");
OutputStream os = new
BufferedOutputStream(outputBucket.getOutputStream());
@@ -513,6 +889,10 @@
TarEntry ze;
for(PutHandler ph : elementsToPutInArchive) {
+ if(persistent()) {
+ container.activate(ph, 1);
+ container.activate(ph.data, 1);
+ }
if(logMINOR)
Logger.minor(this, "Putting into tar: "+ph+"
data length "+ph.data.size()+" name "+ph.targetInArchive);
ze = new TarEntry(ph.targetInArchive);
@@ -546,15 +926,19 @@
return ARCHIVE_TYPE.TAR.mimeTypes[0];
}
- private String createZipBucket(Bucket inputBucket, Bucket outputBucket)
throws IOException {
+ private String createZipBucket(Bucket inputBucket, Bucket outputBucket,
ObjectContainer container) throws IOException {
if(logMINOR) Logger.minor(this, "Create a ZIP Bucket");
OutputStream os = new
BufferedOutputStream(outputBucket.getOutputStream());
ZipOutputStream zos = new ZipOutputStream(os);
ZipEntry ze;
- for(Iterator i = elementsToPutInArchive.iterator();
i.hasNext();) {
- PutHandler ph = (PutHandler) i.next();
+ for(Iterator<PutHandler> i = elementsToPutInArchive.iterator();
i.hasNext();) {
+ PutHandler ph = i.next();
+ if(persistent()) {
+ container.activate(ph, 1);
+ container.activate(ph.data, 1);
+ }
ze = new ZipEntry(ph.targetInArchive);
ze.setTime(0);
zos.putNextEntry(ze);
@@ -581,14 +965,20 @@
* Start inserts for unresolved (too big) Metadata's. Eventually these
will call back with an onEncode(),
* meaning they have the CHK, and we can progress to
resolveAndStartBase().
* @param e
+ * @param container
+ * @param context
* @return
* @throws InsertException
* @throws IOException
*/
- private void resolve(MetadataUnresolvedException e) throws
InsertException, IOException {
+ private void resolve(MetadataUnresolvedException e, ObjectContainer
container, ClientContext context) throws InsertException, IOException {
Metadata[] metas = e.mustResolve;
+ if(persistent())
+ container.activate(metadataPuttersByMetadata, 2);
for(int i=0;i<metas.length;i++) {
Metadata m = metas[i];
+ if(persistent()) container.activate(m, 100);
+ if(logMINOR) Logger.minor(this, "Resolving "+m);
synchronized(this) {
if(metadataPuttersByMetadata.containsKey(m)) {
if(logMINOR) Logger.minor(this,
"Already started insert for "+m+" in resolve() for "+metas.length+"
Metadata's");
@@ -597,169 +987,489 @@
}
if(m.isResolved()) {
Logger.error(this, "Already resolved: "+m+" in
resolve() - race condition???");
+ if(persistent()) container.deactivate(m, 1);
continue;
}
try {
- Bucket b = m.toBucket(ctx.bf);
+ Bucket b =
m.toBucket(context.getBucketFactory(persistent()));
- InsertBlock ib = new InsertBlock(b, null,
FreenetURI.EMPTY_CHK_URI);
+ InsertBlock ib = new InsertBlock(b, null,
persistent() ? FreenetURI.EMPTY_CHK_URI.clone() : FreenetURI.EMPTY_CHK_URI);
SingleFileInserter metadataInserter =
new SingleFileInserter(this, this, ib,
true, ctx, false, getCHKOnly, false, m, null, true, null, earlyEncode);
if(logMINOR) Logger.minor(this, "Inserting
subsidiary metadata: "+metadataInserter+" for "+m);
synchronized(this) {
this.metadataPuttersByMetadata.put(m,
metadataInserter);
}
- metadataInserter.start(null);
+ metadataInserter.start(null, container,
context);
+ if(persistent()) {
+ container.deactivate(metadataInserter,
1);
+ container.deactivate(m, 1);
+ }
} catch (MetadataUnresolvedException e1) {
- resolve(e1);
+ resolve(e1, container, context);
+ container.deactivate(m, 1);
}
}
+ if(persistent()) {
+ container.store(metadataPuttersByMetadata);
+ container.deactivate(metadataPuttersByMetadata, 1);
+ }
}
- private void namesToByteArrays(HashMap putHandlersByName, HashMap
namesToByteArrays) {
- Iterator i = putHandlersByName.keySet().iterator();
+ private void namesToByteArrays(HashMap<String, Object>
putHandlersByName, HashMap<String,Object> namesToByteArrays, ObjectContainer
container) {
+ Iterator<String> i = putHandlersByName.keySet().iterator();
while(i.hasNext()) {
- String name = (String) i.next();
+ String name = i.next();
Object o = putHandlersByName.get(name);
if(o instanceof PutHandler) {
PutHandler ph = (PutHandler) o;
+ if(persistent())
+ container.activate(ph, 1);
Metadata meta = ph.metadata;
+ if(ph.metadata == null)
+ Logger.error(this, "Metadata for
"+name+" : "+ph+" is null");
+ ph.clearMetadata(container);
+ if(persistent())
+ container.activate(meta, 100);
+ Logger.minor(this, "Putting "+name);
namesToByteArrays.put(name, meta);
if(logMINOR)
Logger.minor(this, "Putting PutHandler
into base metadata: "+ph+" name "+name);
} else if(o instanceof HashMap) {
- HashMap subMap = new HashMap();
+ HashMap<String,Object> subMap = new
HashMap<String,Object>();
+ if(persistent())
+ container.activate(o, 2); // Depth 1
doesn't load the elements...
namesToByteArrays.put(name, subMap);
if(logMINOR)
- Logger.minor(this, "Putting hashmap
into base metadata: "+name);
- namesToByteArrays((HashMap)o, subMap);
+ Logger.minor(this, "Putting hashmap
into base metadata: "+name+" size "+((HashMap)o).size()+" active = "+container
== null ? "null" : Boolean.toString(container.ext().isActive(o)));
+ Logger.minor(this, "Putting directory: "+name);
+ namesToByteArrays((HashMap<String, Object>)o,
subMap, container);
} else
throw new IllegalStateException();
}
}
- private void insertedAllFiles() {
+ private void insertedAllFiles(ObjectContainer container, ClientContext
context) {
if(logMINOR) Logger.minor(this, "Inserted all files");
synchronized(this) {
insertedAllFiles = true;
if(finished || cancelled) {
if(logMINOR) Logger.minor(this, "Already
"+(finished?"finished":"cancelled"));
+ if(persistent())
+ container.store(this);
return;
}
if(!insertedManifest) {
if(logMINOR) Logger.minor(this, "Haven't
inserted manifest");
+ if(persistent())
+ container.store(this);
return;
}
finished = true;
}
- complete();
+ if(persistent())
+ container.store(this);
+ complete(container, context);
}
- private void complete() {
- cb.onSuccess(this);
+ private void complete(ObjectContainer container, ClientContext context)
{
+ // FIXME we could remove the put handlers after inserting all
files but not having finished the insert of the manifest
+ // However it would complicate matters for no real gain in most
cases...
+ // Also doing it this way means we don't need to worry about
+ if(persistent()) removePutHandlers(container, context);
+ boolean deactivateCB = false;
+ if(persistent()) {
+ deactivateCB = !container.ext().isActive(cb);
+ container.activate(cb, 1);
+ }
+ cb.onSuccess(this, container);
+ if(deactivateCB)
+ container.deactivate(cb, 1);
}
- private void fail(InsertException e) {
+ private void fail(InsertException e, ObjectContainer container,
ClientContext context) {
// Cancel all, then call the callback
- cancelAndFinish();
+ synchronized(this) {
+ if(finished) return;
+ finished = true;
+ }
+ cancelAndFinish(container, context);
+ if(persistent()) removePutHandlers(container, context);
- cb.onFailure(e, this);
+ if(persistent())
+ container.activate(cb, 1);
+ cb.onFailure(e, this, container);
}
+ private void removePutHandlers(ObjectContainer container, ClientContext
context) {
+ container.activate(putHandlersByName, 2);
+ container.activate(runningPutHandlers, 2);
+ container.activate(putHandlersWaitingForMetadata, 2);
+ container.activate(waitingForBlockSets, 2);
+ container.activate(putHandlersWaitingForFetchable, 2);
+ container.activate(elementsToPutInArchive, 2);
+ removePutHandlersByName(container, context, putHandlersByName);
+ putHandlersByName = null;
+
+ if(!runningPutHandlers.isEmpty()) {
+ Logger.error(this, "Running put handlers not part of
putHandlersByName: "+runningPutHandlers.size()+" in removePutHandlers() on
"+this, new Exception("error"));
+ PutHandler[] handlers = runningPutHandlers.toArray(new
PutHandler[runningPutHandlers.size()]);
+ for(PutHandler handler : handlers) {
+ container.activate(handler, 1);
+ Logger.error(this, "Still running, but not in
putHandlersByName: "+handler);
+ handler.cancel();
+ handler.removeFrom(container, context);
+ }
+ runningPutHandlers.clear();
+ }
+ if(!putHandlersWaitingForMetadata.isEmpty()) {
+ Logger.error(this, "Put handlers waiting for metadata,
not part of putHandlersByName: "+putHandlersWaitingForMetadata.size()+" in
removePutHandlers() on "+this, new Exception("error"));
+ PutHandler[] handlers =
putHandlersWaitingForMetadata.toArray(new
PutHandler[putHandlersWaitingForMetadata.size()]);
+ for(PutHandler handler : handlers) {
+ container.activate(handler, 1);
+ Logger.error(this, "Still waiting for metadata,
but not in putHandlersByName: "+handler);
+ handler.cancel();
+ handler.removeFrom(container, context);
+ }
+ putHandlersWaitingForMetadata.clear();
+ }
+ if(!waitingForBlockSets.isEmpty()) {
+ Logger.error(this, "Put handlers waiting for block
sets, not part of putHandlersByName: "+waitingForBlockSets.size()+" in
removePutHandlers() on "+this, new Exception("error"));
+ PutHandler[] handlers = waitingForBlockSets.toArray(new
PutHandler[waitingForBlockSets.size()]);
+ for(PutHandler handler : handlers) {
+ container.activate(handler, 1);
+ Logger.error(this, "Still waiting for block
set, but not in putHandlersByName: "+handler);
+ handler.cancel();
+ handler.removeFrom(container, context);
+ }
+ waitingForBlockSets.clear();
+ }
+ if(!putHandlersWaitingForFetchable.isEmpty()) {
+ Logger.error(this, "Put handlers waiting for fetchable,
not part of putHandlersByName: "+putHandlersWaitingForFetchable.size()+" in
removePutHandlers() on "+this, new Exception("error"));
+ PutHandler[] handlers =
putHandlersWaitingForFetchable.toArray(new
PutHandler[putHandlersWaitingForFetchable.size()]);
+ for(PutHandler handler : handlers) {
+ container.activate(handler, 1);
+ Logger.error(this, "Still waiting for
fetchable, but not in putHandlersByName: "+handler);
+ handler.cancel();
+ handler.removeFrom(container, context);
+ }
+ putHandlersWaitingForFetchable.clear();
+ }
+ if(!elementsToPutInArchive.isEmpty()) {
+ Logger.error(this, "Elements to put in archive, not
part of putHandlersByName: "+elementsToPutInArchive.size()+" in
removePutHandlers() on "+this, new Exception("error"));
+ PutHandler[] handlers =
elementsToPutInArchive.toArray(new PutHandler[elementsToPutInArchive.size()]);
+ for(PutHandler handler : handlers) {
+ container.activate(handler, 1);
+ Logger.error(this, "To put in archive, but not
in putHandlersByName: "+handler);
+ handler.removeFrom(container, context);
+ }
+ elementsToPutInArchive.clear();
+ }
+
+ container.delete(runningPutHandlers);
+ container.delete(putHandlersWaitingForMetadata);
+ container.delete(waitingForBlockSets);
+ container.delete(putHandlersWaitingForFetchable);
+ container.delete(elementsToPutInArchive);
+ runningPutHandlers = null;
+ putHandlersWaitingForMetadata = null;
+ waitingForBlockSets = null;
+ putHandlersWaitingForFetchable = null;
+ elementsToPutInArchive = null;
+ container.store(this);
+ }
+
/**
+ * Remove all PutHandler's from the given putHandlersByName sub-map.
+ * Remove the PutHandler's themselves also, remove them from and
complain about
+ * runningPutHandlers, putHandlersWaitingForMetadata,
waitingForBlockSets,
+ * putHandlersWaitingForFetchable, which must have been activated by
the caller.
+ * @param container
+ * @param putHandlersByName
+ */
+ private void removePutHandlersByName(ObjectContainer container,
ClientContext context, HashMap<String, Object> putHandlersByName) {
+ if(logMINOR) Logger.minor(this, "removePutHandlersByName on
"+this+" : map size = "+putHandlersByName.size());
+ for(Map.Entry<String, Object> entry :
putHandlersByName.entrySet()) {
+ String key = entry.getKey();
+ Object value = entry.getValue();
+ if(value instanceof PutHandler) {
+ PutHandler handler = (PutHandler) value;
+ container.activate(handler, 1);
+ if(runningPutHandlers.remove(handler))
+
container.ext().store(runningPutHandlers, 2);
+
if(putHandlersWaitingForMetadata.remove(handler))
+
container.ext().store(putHandlersWaitingForMetadata, 2);
+ if(waitingForBlockSets.remove(handler))
+
container.ext().store(waitingForBlockSets, 2);
+
if(putHandlersWaitingForFetchable.remove(handler))
+
container.ext().store(putHandlersWaitingForFetchable, 2);
+ if(elementsToPutInArchive.remove(handler))
+
container.ext().store(elementsToPutInArchive, 2);
+ handler.removeFrom(container, context);
+ } else {
+ HashMap<String, Object> subMap =
(HashMap<String, Object>) value;
+ container.activate(subMap, 2);
+ removePutHandlersByName(container, context,
subMap);
+ }
+ container.delete(key);
+ }
+ putHandlersByName.clear();
+ container.delete(putHandlersByName);
+ }
+
+ /**
* Cancel all running inserters and set finished to true.
*/
- private void cancelAndFinish() {
+ private void cancelAndFinish(ObjectContainer container, ClientContext
context) {
PutHandler[] running;
+ boolean persistent = persistent();
+ if(persistent)
+ container.activate(runningPutHandlers, 2);
synchronized(this) {
- if(finished) return;
- running = (PutHandler[]) runningPutHandlers.toArray(new
PutHandler[runningPutHandlers.size()]);
- finished = true;
+ running = runningPutHandlers.toArray(new
PutHandler[runningPutHandlers.size()]);
}
+ if(persistent())
+ container.store(this);
- for(int i=0;i<running.length;i++) {
- running[i].cancel();
+ if(logMINOR) Logger.minor(this, "PutHandler's to cancel:
"+running.length);
+ for(PutHandler putter : running) {
+ boolean active = true;
+ if(persistent) {
+ active = container.ext().isActive(putter);
+ if(!active) container.activate(putter, 1);
+ }
+ putter.cancel(container, context);
+ if(!active) container.deactivate(putter, 1);
+ if(persistent) container.activate(this, 1);
}
+
+ ClientPutState[] runningMeta;
+ if(persistent())
+ container.activate(metadataPuttersByMetadata, 2);
+ synchronized(this) {
+ runningMeta =
metadataPuttersByMetadata.values().toArray(new
ClientPutState[metadataPuttersByMetadata.size()]);
+ }
+
+ if(logMINOR) Logger.minor(this, "Metadata putters to cancel:
"+runningMeta.length);
+ for(ClientPutState putter : runningMeta) {
+ boolean active = true;
+ if(persistent) {
+ active = container.ext().isActive(putter);
+ if(!active) container.activate(putter, 1);
+ }
+ putter.cancel(container, context);
+ if(!active) container.deactivate(putter, 1);
+ if(persistent) container.activate(this, 1);
+ }
+
}
@Override
- public void cancel() {
- super.cancel();
- fail(new InsertException(InsertException.CANCELLED));
+ public void cancel(ObjectContainer container, ClientContext context) {
+ synchronized(this) {
+ if(super.cancel()) return;
+ if(finished) return;
+ }
+ if(persistent())
+ container.store(this);
+ fail(new InsertException(InsertException.CANCELLED), container,
context);
}
- public void onSuccess(ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void onSuccess(ClientPutState state, ObjectContainer container,
ClientContext context) {
+ if(persistent()) {
+ container.activate(metadataPuttersByMetadata, 2);
+ }
+ boolean fin = false;
+ ClientPutState oldState = null;
+ Metadata token = (Metadata) state.getToken();
synchronized(this) {
- metadataPuttersByMetadata.remove(state.getToken());
+ if(persistent()) container.activate(token, 1);
+ boolean present =
metadataPuttersByMetadata.containsKey(token);
+ if(present) {
+ oldState =
metadataPuttersByMetadata.remove(token);
+ if(persistent())
+
container.activate(metadataPuttersUnfetchable, 2);
+
if(metadataPuttersUnfetchable.containsKey(token)) {
+
metadataPuttersUnfetchable.remove(token);
+ if(persistent())
+
container.store(metadataPuttersUnfetchable);
+ }
+ }
if(!metadataPuttersByMetadata.isEmpty()) {
if(logMINOR) Logger.minor(this, "Still running
metadata putters: "+metadataPuttersByMetadata.size());
- return;
+ } else {
+ Logger.minor(this, "Inserted manifest
successfully on "+this+" : "+state);
+ insertedManifest = true;
+ if(finished) {
+ if(logMINOR) Logger.minor(this,
"Already finished");
+ if(persistent())
+ container.store(this);
+ } else if(!insertedAllFiles) {
+ if(logMINOR) Logger.minor(this, "Not
inserted all files");
+ if(persistent())
+ container.store(this);
+ } else {
+ finished = true;
+ if(persistent()) container.store(this);
+ fin = true;
+ }
}
- Logger.minor(this, "Inserted manifest successfully on
"+this);
- insertedManifest = true;
- if(finished) {
- if(logMINOR) Logger.minor(this, "Already
finished");
- return;
+ }
+ if(token != baseMetadata)
+ token.removeFrom(container);
+ if(persistent()) {
+ container.store(metadataPuttersByMetadata);
+ container.deactivate(metadataPuttersByMetadata, 1);
+ state.removeFrom(container, context);
+ if(oldState != state && oldState != null) {
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
}
- if(!insertedAllFiles) {
- if(logMINOR) Logger.minor(this, "Not inserted
all files");
- return;
- }
- finished = true;
}
- complete();
+ if(fin)
+ complete(container, context);
}
- public void onFailure(InsertException e, ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- fail(e);
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent()) {
+ container.activate(metadataPuttersByMetadata, 2);
+ }
+ ClientPutState oldState = null;
+ Metadata token = (Metadata) state.getToken();
+ synchronized(this) {
+ if(persistent()) container.activate(token, 1);
+ boolean present =
metadataPuttersByMetadata.containsKey(token);
+ if(present) {
+ oldState =
metadataPuttersByMetadata.remove(token);
+ if(persistent())
+
container.activate(metadataPuttersUnfetchable, 2);
+
if(metadataPuttersUnfetchable.containsKey(token)) {
+
metadataPuttersUnfetchable.remove(token);
+
container.store(metadataPuttersUnfetchable);
+ }
+ }
+ }
+ if(token != baseMetadata)
+ token.removeFrom(container);
+ if(persistent()) {
+ container.store(metadataPuttersByMetadata);
+ container.deactivate(metadataPuttersByMetadata, 1);
+ state.removeFrom(container, context);
+ if(oldState != state && oldState != null) {
+ container.activate(oldState, 1);
+ oldState.removeFrom(container, context);
+ }
+ }
+ fail(e, container, context);
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
if(state.getToken() == baseMetadata) {
this.finalURI = key.getURI();
if(logMINOR) Logger.minor(this, "Got metadata key:
"+finalURI);
- cb.onGeneratedURI(finalURI, this);
+ if(persistent())
+ container.activate(cb, 1);
+ cb.onGeneratedURI(persistent() ? finalURI.clone() :
finalURI, this, container);
+ if(persistent())
+ container.deactivate(cb, 1);
+ if(persistent())
+ container.store(this);
} else {
// It's a sub-Metadata
Metadata m = (Metadata) state.getToken();
+ if(persistent())
+ container.activate(m, 2);
m.resolve(key.getURI());
+ if(persistent())
+ container.store(m);
if(logMINOR) Logger.minor(this, "Resolved "+m+" :
"+key.getURI());
- resolveAndStartBase();
+ resolveAndStartBase(container, context);
}
}
- public void onTransition(ClientPutState oldState, ClientPutState
newState) {
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container) {
+ Metadata m = (Metadata) oldState.getToken();
+ if(persistent()) {
+ container.activate(m, 100);
+ container.activate(metadataPuttersUnfetchable, 2);
+ container.activate(metadataPuttersByMetadata, 2);
+ }
synchronized(this) {
+ if(metadataPuttersByMetadata.containsKey(m)) {
+ if(persistent()) container.store(newState);
+ metadataPuttersByMetadata.put(m, newState);
+ if(persistent())
container.ext().store(metadataPuttersByMetadata, 2);
+ if(logMINOR) Logger.minor(this, "Metadata
putter transition: "+oldState+" -> "+newState);
+ if(metadataPuttersUnfetchable.containsKey(m)) {
+ metadataPuttersUnfetchable.put(m,
newState);
+ if(persistent())
container.ext().store(metadataPuttersUnfetchable, 2);
+ if(logMINOR) Logger.minor(this,
"Unfetchable metadata putter transition: "+oldState+" -> "+newState);
+ }
+ } else {
+ Logger.error(this, "onTransition() but
metadataPuttersByMetadata does not contain metadata tag "+m+" for "+oldState+"
should -> "+newState);
+ }
+ }
+
+ if(persistent()) {
+ container.deactivate(m, 100);
+ container.deactivate(metadataPuttersUnfetchable, 2);
+ container.deactivate(metadataPuttersByMetadata, 2);
+ }
+ synchronized(this) {
if(logMINOR) Logger.minor(this, "Transition:
"+oldState+" -> "+newState);
}
}
- public void onMetadata(Metadata m, ClientPutState state) {
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
// Ignore
}
@Override
- public void notifyClients() {
- ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized));
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
+ if(persistent()) {
+ container.activate(ctx, 1);
+ container.activate(ctx.eventProducer, 1);
+ }
+ ctx.eventProducer.produceEvent(new
SplitfileProgressEvent(this.totalBlocks, this.successfulBlocks,
this.failedBlocks, this.fatallyFailedBlocks, this.minSuccessBlocks,
this.blockSetFinalized), container, context);
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
synchronized(this) {
this.metadataBlockSetFinalized = true;
- if(!waitingForBlockSets.isEmpty()) return;
+ if(persistent())
+ container.activate(waitingForBlockSets, 2);
+ if(!waitingForBlockSets.isEmpty()) {
+ if(persistent()) {
+ container.store(this);
+
container.deactivate(waitingForBlockSets, 1);
+ }
+ return;
+ }
}
- this.blockSetFinalized();
+ this.blockSetFinalized(container, context);
+ if(persistent()) {
+ container.store(this);
+ container.deactivate(waitingForBlockSets, 1);
+ }
}
@Override
- public void blockSetFinalized() {
+ public void blockSetFinalized(ObjectContainer container, ClientContext
context) {
synchronized(this) {
if(!metadataBlockSetFinalized) return;
- if(waitingForBlockSets.isEmpty()) return;
+ if(persistent())
+ container.activate(waitingForBlockSets, 2);
+ if(waitingForBlockSets.isEmpty()) {
+ if(persistent())
+
container.deactivate(waitingForBlockSets, 1);
+ return;
+ }
}
- super.blockSetFinalized();
+ if(persistent())
+ container.deactivate(waitingForBlockSets, 1);
+ super.blockSetFinalized(container, context);
+ if(persistent())
+ container.store(this);
}
/**
@@ -767,17 +1477,17 @@
* All are to have mimeOverride=null, i.e. we use the auto-detected
mime type
* from the filename.
*/
- public static HashMap bucketsByNameToManifestEntries(HashMap
bucketsByName) {
- HashMap manifestEntries = new HashMap();
- Iterator i = bucketsByName.keySet().iterator();
+ public static HashMap<String, Object>
bucketsByNameToManifestEntries(HashMap<String,Object> bucketsByName) {
+ HashMap<String,Object> manifestEntries = new
HashMap<String,Object>();
+ Iterator<String> i = bucketsByName.keySet().iterator();
while(i.hasNext()) {
- String name = (String) i.next();
+ String name = i.next();
Object o = bucketsByName.get(name);
if(o instanceof Bucket) {
Bucket data = (Bucket) bucketsByName.get(name);
manifestEntries.put(name, new
ManifestElement(name, data, null,data.size()));
} else if(o instanceof HashMap) {
- manifestEntries.put(name,
bucketsByNameToManifestEntries((HashMap)o));
+ manifestEntries.put(name,
bucketsByNameToManifestEntries((HashMap<String, Object>)o));
} else
throw new
IllegalArgumentException(String.valueOf(o));
}
@@ -788,20 +1498,20 @@
* Convert a hierarchy of HashMap's of ManifestEntries into a series of
* ManifestElement's, each of which has a full path.
*/
- public static ManifestElement[] flatten(HashMap manifestElements) {
- Vector v = new Vector();
+ public static ManifestElement[] flatten(HashMap<String,Object>
manifestElements) {
+ Vector<ManifestElement> v = new Vector<ManifestElement>();
flatten(manifestElements, v, "");
- return (ManifestElement[]) v.toArray(new
ManifestElement[v.size()]);
+ return v.toArray(new ManifestElement[v.size()]);
}
- public static void flatten(HashMap manifestElements, Vector v, String
prefix) {
- Iterator i = manifestElements.keySet().iterator();
+ public static void flatten(HashMap<String,Object> manifestElements,
Vector<ManifestElement> v, String prefix) {
+ Iterator<String> i = manifestElements.keySet().iterator();
while(i.hasNext()) {
- String name = (String) i.next();
+ String name = i.next();
String fullName = prefix.length() == 0 ? name : prefix+
'/' +name;
Object o = manifestElements.get(name);
if(o instanceof HashMap) {
- flatten((HashMap)o, v, fullName);
+ flatten((HashMap<String, Object>)o, v,
fullName);
} else if(o instanceof ManifestElement) {
ManifestElement me = (ManifestElement) o;
v.add(new ManifestElement(me, fullName));
@@ -849,37 +1559,186 @@
}
@Override
- public void onMajorProgress() {
- cb.onMajorProgress();
+ public void onMajorProgress(ObjectContainer container) {
+ if(persistent())
+ container.activate(cb, 1);
+ cb.onMajorProgress(container);
}
- protected void onFetchable(PutHandler handler) {
- synchronized(this) {
- putHandlersWaitingForFetchable.remove(handler);
- if(fetchable) return;
- if(!putHandlersWaitingForFetchable.isEmpty()) return;
- if(!hasResolvedBase) return;
- if(!metadataPuttersUnfetchable.isEmpty()) return;
- fetchable = true;
+ protected void onFetchable(PutHandler handler, ObjectContainer
container) {
+ if(persistent()) {
+ container.activate(putHandlersWaitingForFetchable, 2);
+ container.activate(metadataPuttersUnfetchable, 2);
}
- cb.onFetchable(this);
+ if(checkFetchable(handler)) {
+ if(persistent()) {
+
container.ext().store(putHandlersWaitingForMetadata, 2);
+ container.store(this);
+
container.deactivate(putHandlersWaitingForFetchable, 1);
+
container.deactivate(metadataPuttersUnfetchable, 1);
+ container.activate(cb, 1);
+ }
+ cb.onFetchable(this, container);
+ if(persistent())
+ container.deactivate(cb, 1);
+ } else {
+ if(persistent()) {
+
container.deactivate(putHandlersWaitingForFetchable, 1);
+
container.deactivate(metadataPuttersUnfetchable, 1);
+ }
+ }
}
- public void onFetchable(ClientPutState state) {
+ private synchronized boolean checkFetchable(PutHandler handler) {
+ putHandlersWaitingForFetchable.remove(handler);
+ if(fetchable) return false;
+ if(!putHandlersWaitingForFetchable.isEmpty()) return false;
+ if(!hasResolvedBase) return false;
+ if(!metadataPuttersUnfetchable.isEmpty()) return false;
+ fetchable = true;
+ return true;
+ }
+
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
Metadata m = (Metadata) state.getToken();
- synchronized(this) {
- metadataPuttersUnfetchable.remove(m);
- if(!metadataPuttersUnfetchable.isEmpty()) return;
- if(fetchable) return;
- if(!putHandlersWaitingForFetchable.isEmpty()) return;
- fetchable = true;
+ if(persistent()) {
+ container.activate(m, 100);
+ container.activate(metadataPuttersUnfetchable, 2);
+ container.activate(putHandlersWaitingForFetchable, 2);
}
- cb.onFetchable(this);
+ if(checkFetchable(m)) {
+ if(persistent()) {
+ container.store(metadataPuttersUnfetchable);
+ container.store(this);
+ container.activate(cb, 1);
+ }
+ cb.onFetchable(this, container);
+ if(persistent())
+ container.deactivate(cb, 1);
+ }
+ if(persistent()) {
+ container.deactivate(metadataPuttersUnfetchable, 1);
+ container.deactivate(putHandlersWaitingForFetchable, 1);
+ }
}
+ private synchronized boolean checkFetchable(Metadata m) {
+ metadataPuttersUnfetchable.remove(m);
+ if(!metadataPuttersUnfetchable.isEmpty()) return false;
+ if(fetchable) return false;
+ if(!putHandlersWaitingForFetchable.isEmpty()) return false;
+ fetchable = true;
+ return true;
+ }
+
@Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore
}
+ @Override
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(putHandlersByName != null) {
+ Logger.error(this, "Put handlers list still present in
removeFrom() on "+this);
+ removePutHandlers(container, context);
+ }
+ if(finalURI != null) finalURI.removeFrom(container);
+ targetURI.removeFrom(container);
+ container.activate(ctx, 1);
+ ctx.removeFrom(container);
+ container.activate(metadataPuttersByMetadata, 2);
+ container.activate(metadataPuttersUnfetchable, 2);
+ if(!metadataPuttersByMetadata.isEmpty()) {
+ Logger.error(this, "Metadata putters by metadata not
empty in removeFrom() on "+this);
+ for(Map.Entry<Metadata, ClientPutState> entry :
metadataPuttersByMetadata.entrySet()) {
+ Metadata meta = entry.getKey();
+ container.activate(meta, 1);
+ ClientPutState sfi = entry.getValue();
+ container.activate(sfi, 1);
+ metadataPuttersUnfetchable.remove(meta);
+ Logger.error(this, "Metadata putters not empty:
"+sfi+" for "+this);
+ sfi.cancel(container, context);
+ sfi.removeFrom(container, context);
+ }
+ }
+ if(!metadataPuttersUnfetchable.isEmpty()) {
+ Logger.error(this, "Metadata putters unfetchable by
metadata not empty in removeFrom() on "+this);
+ for(Map.Entry<Metadata, ClientPutState> entry :
metadataPuttersByMetadata.entrySet()) {
+ Metadata meta = entry.getKey();
+ container.activate(meta, 1);
+ ClientPutState sfi = entry.getValue();
+ container.activate(sfi, 1);
+ metadataPuttersUnfetchable.remove(meta);
+ Logger.error(this, "Metadata putters
unfetchable not empty: "+sfi+" for "+this);
+ sfi.cancel(container, context);
+ sfi.removeFrom(container, context);
+ }
+ }
+ metadataPuttersByMetadata.clear();
+ metadataPuttersUnfetchable.clear();
+ container.delete(metadataPuttersByMetadata);
+ container.delete(metadataPuttersUnfetchable);
+ if(baseMetadata != null) {
+ container.activate(baseMetadata, 1);
+ baseMetadata.removeFrom(container);
+ }
+ container.activate(runGotAllMetadata, 1);
+ container.delete(runGotAllMetadata);
+ super.removeFrom(container, context);
+ }
+
+ public void objectOnUpdate(ObjectContainer container) {
+ if(logMINOR) Logger.minor(this, "Updating "+this+"
activated="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("debug"));
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(finished) {
+ Logger.error(this, "Storing "+this+" when already
finished!", new Exception("error"));
+ return false;
+ }
+ if(logMINOR) Logger.minor(this, "Storing "+this+"
activated="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("debug"));
+ return true;
+ }
+
+ // compose helper stuff
+
+ protected final ClientMetadata guessMime(String name, ManifestElement
me) {
+ String mimeType = me.mimeOverride;
+ if(mimeType == null)
+ mimeType = DefaultMIMETypes.guessMIMEType(name, true);
+ ClientMetadata cm;
+ if(mimeType == null ||
mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE))
+ cm = null;
+ else
+ cm = new ClientMetadata(mimeType);
+ return cm;
+ }
+
+ protected final void addRedirectNoMime(String name, ManifestElement me,
HashMap<String, Object> putHandlersByName2) {
+ addRedirect(name, me, null, putHandlersByName2);
+ }
+
+ protected final void addRedirect(String name, ManifestElement me,
HashMap<String, Object> putHandlersByName2) {
+ addRedirect(name, me, guessMime(name, me), putHandlersByName2);
+ }
+
+ protected final void addRedirect(String name, ManifestElement me,
ClientMetadata cm, HashMap<String, Object> putHandlersByName2) {
+ PutHandler ph;
+ Bucket data = me.data;
+ if(me.targetURI != null) {
+ ph = new PutHandler(this, name, me.targetURI, cm);
+ // Just a placeholder, don't actually run it
+ } else {
+ ph = new PutHandler(this, name, data, cm, getCHKOnly);
+ runningPutHandlers.add(ph);
+ putHandlersWaitingForMetadata.add(ph);
+ putHandlersWaitingForFetchable.add(ph);
+ if(logMINOR)
+ Logger.minor(this, "Inserting separately as
PutHandler: "+name+" : "+ph+" persistent="+ph.persistent()+":"+ph.persistent+"
"+persistent());
+ numberOfFiles++;
+ totalSize += data.size();
+ }
+ putHandlersByName2.put(name, ph);
+ }
}
+
Modified: trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SimpleSingleFileFetcher.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.IOException;
+import com.db4o.ObjectContainer;
+
import freenet.client.ClientMetadata;
import freenet.client.FetchContext;
import freenet.client.FetchException;
@@ -14,7 +16,6 @@
import freenet.keys.KeyDecodeException;
import freenet.keys.TooBigException;
import freenet.node.LowLevelGetException;
-import freenet.node.RequestScheduler;
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -25,15 +26,15 @@
public class SimpleSingleFileFetcher extends BaseSingleFileFetcher implements
ClientGetState {
SimpleSingleFileFetcher(ClientKey key, int maxRetries, FetchContext
ctx, ClientRequester parent,
- GetCompletionCallback rcb, boolean isEssential, boolean
dontAdd, long l) {
- super(key, maxRetries, ctx, parent);
+ GetCompletionCallback rcb, boolean isEssential, boolean
dontAdd, long l, ObjectContainer container, ClientContext context, boolean
deleteFetchContext) {
+ super(key, maxRetries, ctx, parent, deleteFetchContext);
this.rcb = rcb;
this.token = l;
if(!dontAdd) {
- parent.addBlock();
+ parent.addBlock(container);
if(isEssential)
- parent.addMustSucceedBlocks(1);
- parent.notifyClients();
+ parent.addMustSucceedBlocks(1, container);
+ parent.notifyClients(container, context);
}
}
@@ -42,51 +43,51 @@
// Translate it, then call the real onFailure
@Override
- public void onFailure(LowLevelGetException e, Object reqTokenIgnored,
RequestScheduler sched) {
+ public void onFailure(LowLevelGetException e, Object reqTokenIgnored,
ObjectContainer container, ClientContext context) {
switch(e.code) {
case LowLevelGetException.DATA_NOT_FOUND:
- onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), sched);
+ onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), false, container, context);
return;
case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
- onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), sched);
+ onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), false, container, context);
return;
case LowLevelGetException.RECENTLY_FAILED:
- onFailure(new
FetchException(FetchException.RECENTLY_FAILED), sched);
+ onFailure(new
FetchException(FetchException.RECENTLY_FAILED), false, container, context);
return;
case LowLevelGetException.DECODE_FAILED:
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), sched);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), false, container, context);
return;
case LowLevelGetException.INTERNAL_ERROR:
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR), sched);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR), false, container, context);
return;
case LowLevelGetException.REJECTED_OVERLOAD:
- onFailure(new
FetchException(FetchException.REJECTED_OVERLOAD), sched);
+ onFailure(new
FetchException(FetchException.REJECTED_OVERLOAD), false, container, context);
return;
case LowLevelGetException.ROUTE_NOT_FOUND:
- onFailure(new
FetchException(FetchException.ROUTE_NOT_FOUND), sched);
+ onFailure(new
FetchException(FetchException.ROUTE_NOT_FOUND), false, container, context);
return;
case LowLevelGetException.TRANSFER_FAILED:
- onFailure(new
FetchException(FetchException.TRANSFER_FAILED), sched);
+ onFailure(new
FetchException(FetchException.TRANSFER_FAILED), false, container, context);
return;
case LowLevelGetException.VERIFY_FAILED:
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), sched);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), false, container, context);
return;
case LowLevelGetException.CANCELLED:
- onFailure(new FetchException(FetchException.CANCELLED),
sched);
+ onFailure(new FetchException(FetchException.CANCELLED),
false, container, context);
return;
default:
Logger.error(this, "Unknown LowLevelGetException code:
"+e.code);
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR), sched);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR), false, container, context);
return;
}
}
- final void onFailure(FetchException e, RequestScheduler sched) {
- onFailure(e, false, sched);
- }
-
// Real onFailure
- protected void onFailure(FetchException e, boolean forceFatal,
RequestScheduler sched) {
+ protected void onFailure(FetchException e, boolean forceFatal,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(rcb, 1);
+ }
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "onFailure( "+e+" ,
"+forceFatal+")", e);
if(parent.isCancelled() || cancelled) {
@@ -95,62 +96,74 @@
forceFatal = true;
}
if(!(e.isFatal() || forceFatal) ) {
- if(retry(sched, ctx.executor)) {
+ if(retry(container, context)) {
if(logMINOR) Logger.minor(this, "Retrying");
return;
}
}
// :(
- unregister(false);
+ unregisterAll(container, context);
+ synchronized(this) {
+ finished = true;
+ }
+ if(persistent)
+ container.store(this);
if(e.isFatal() || forceFatal)
- parent.fatallyFailedBlock();
+ parent.fatallyFailedBlock(container, context);
else
- parent.failedBlock();
- rcb.onFailure(e, this);
+ parent.failedBlock(container, context);
+ rcb.onFailure(e, this, container, context);
}
/** Will be overridden by SingleFileFetcher */
- protected void onSuccess(FetchResult data, RequestScheduler sched) {
- unregister(false);
+ protected void onSuccess(FetchResult data, ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(rcb, 1);
+ }
if(parent.isCancelled()) {
data.asBucket().free();
- onFailure(new FetchException(FetchException.CANCELLED),
sched);
+ if(persistent) data.asBucket().removeFrom(container);
+ onFailure(new FetchException(FetchException.CANCELLED),
false, container, context);
return;
}
- rcb.onSuccess(data, this);
+ rcb.onSuccess(data, this, container, context);
}
@Override
- public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
reqTokenIgnored, RequestScheduler sched) {
+ public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
reqTokenIgnored, ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(parent, 1);
+ }
if(parent instanceof ClientGetter)
- ((ClientGetter)parent).addKeyToBinaryBlob(block);
- Bucket data = extract(block, sched);
+ ((ClientGetter)parent).addKeyToBinaryBlob(block,
container, context);
+ Bucket data = extract(block, container, context);
if(data == null) return; // failed
if(!block.isMetadata()) {
- onSuccess(new FetchResult((ClientMetadata)null, data),
sched);
+ onSuccess(new FetchResult((ClientMetadata)null, data),
container, context);
} else {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), false, container, context);
}
}
/** Convert a ClientKeyBlock to a Bucket. If an error occurs, report it
via onFailure
* and return null.
*/
- protected Bucket extract(ClientKeyBlock block, RequestScheduler sched) {
+ protected Bucket extract(ClientKeyBlock block, ObjectContainer
container, ClientContext context) {
Bucket data;
try {
- data = block.decode(ctx.bucketFactory,
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false);
+ data =
block.decode(context.getBucketFactory(parent.persistent()),
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false);
} catch (KeyDecodeException e1) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Decode failure: "+e1, e1);
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), sched);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), false,
container, context);
return null;
} catch (TooBigException e) {
- onFailure(new FetchException(FetchException.TOO_BIG,
e), sched);
+ onFailure(new FetchException(FetchException.TOO_BIG,
e), false, container, context);
return null;
} catch (IOException e) {
Logger.error(this, "Could not capture data - disk
full?: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), false, container, context);
return null;
}
return data;
@@ -161,4 +174,13 @@
return token;
}
+ public void onFailed(KeyListenerConstructionException e,
ObjectContainer container, ClientContext context) {
+ onFailure(e.getFetchException(), false, container, context);
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ super.removeFrom(container, context);
+ // rcb is definitely not our responsibility.
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/SingleBlockInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleBlockInserter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SingleBlockInserter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -4,34 +4,46 @@
package freenet.client.async;
import java.io.IOException;
-import java.lang.ref.SoftReference;
import java.net.MalformedURLException;
+import java.util.Collections;
+import java.util.List;
+import com.db4o.ObjectContainer;
+
import freenet.client.FailureCodeTracker;
import freenet.client.InsertContext;
import freenet.client.InsertException;
+import freenet.crypt.RandomSource;
import freenet.keys.CHKEncodeException;
import freenet.keys.ClientCHKBlock;
+import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.FreenetURI;
import freenet.keys.InsertableClientSSK;
+import freenet.keys.KeyEncodeException;
import freenet.keys.SSKEncodeException;
import freenet.node.KeysFetchingLocally;
import freenet.node.LowLevelPutException;
import freenet.node.NodeClientCore;
+import freenet.node.NullSendableRequestItem;
+import freenet.node.RequestClient;
import freenet.node.RequestScheduler;
import freenet.node.SendableInsert;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
+import freenet.support.io.BucketTools;
+import freenet.support.io.NativeThread;
/**
* Insert *ONE KEY*.
*/
-public class SingleBlockInserter extends SendableInsert implements
ClientPutState {
+public class SingleBlockInserter extends SendableInsert implements
ClientPutState, Encodeable {
private static boolean logMINOR;
- final Bucket sourceData;
+ Bucket sourceData;
final short compressionCodec;
final FreenetURI uri; // uses essentially no RAM in the common case of
a CHK because we use FreenetURI.EMPTY_CHK_URI
FreenetURI resultingURI;
@@ -42,16 +54,38 @@
private final FailureCodeTracker errors;
private boolean finished;
private final boolean dontSendEncoded;
- private SoftReference refToClientKeyBlock;
final int token; // for e.g. splitfiles
private final Object tokenObject;
final boolean isMetadata;
final boolean getCHKOnly;
final int sourceLength;
private int consecutiveRNFs;
+ private boolean isSSK;
private boolean freeData;
- public SingleBlockInserter(BaseClientPutter parent, Bucket data, short
compressionCodec, FreenetURI uri, InsertContext ctx, PutCompletionCallback cb,
boolean isMetadata, int sourceLength, int token, boolean getCHKOnly, boolean
addToParent, boolean dontSendEncoded, Object tokenObject, boolean freeData) {
+ /**
+ * Create a SingleBlockInserter.
+ * @param parent
+ * @param data
+ * @param compressionCodec The compression codec.
+ * @param uri
+ * @param ctx
+ * @param cb
+ * @param isMetadata
+ * @param sourceLength The length of the original, uncompressed data.
+ * @param token
+ * @param getCHKOnly
+ * @param addToParent
+ * @param dontSendEncoded
+ * @param tokenObject
+ * @param container
+ * @param context
+ * @param persistent
+ * @param freeData
+ */
+ public SingleBlockInserter(BaseClientPutter parent, Bucket data, short
compressionCodec, FreenetURI uri, InsertContext ctx, PutCompletionCallback cb,
boolean isMetadata, int sourceLength, int token, boolean getCHKOnly, boolean
addToParent, boolean dontSendEncoded, Object tokenObject, ObjectContainer
container, ClientContext context, boolean persistent, boolean freeData) {
+ super(persistent);
+ assert(persistent == parent.persistent());
this.consecutiveRNFs = 0;
this.tokenObject = tokenObject;
this.token = token;
@@ -70,91 +104,120 @@
this.isMetadata = isMetadata;
this.sourceLength = sourceLength;
this.getCHKOnly = getCHKOnly;
+ isSSK = uri.getKeyType().toUpperCase().equals("SSK");
if(addToParent) {
- parent.addBlock();
- parent.addMustSucceedBlocks(1);
- parent.notifyClients();
+ parent.addBlock(container);
+ parent.addMustSucceedBlocks(1, container);
+ parent.notifyClients(container, context);
}
logMINOR = Logger.shouldLog(Logger.MINOR, this);
}
- protected ClientKeyBlock innerEncode() throws InsertException {
+ protected ClientKeyBlock innerEncode(RandomSource random,
ObjectContainer container) throws InsertException {
+ if(persistent) {
+ container.activate(uri, 1);
+ container.activate(sourceData, 1);
+ }
+ try {
+ return innerEncode(random, uri, sourceData, isMetadata,
compressionCodec, sourceLength);
+ } catch (KeyEncodeException e) {
+ Logger.error(SingleBlockInserter.class, "Caught "+e, e);
+ throw new
InsertException(InsertException.INTERNAL_ERROR, e, null);
+ } catch (MalformedURLException e) {
+ throw new InsertException(InsertException.INVALID_URI,
e, null);
+ } catch (IOException e) {
+ Logger.error(SingleBlockInserter.class, "Caught "+e+"
encoding data "+sourceData, e);
+ throw new InsertException(InsertException.BUCKET_ERROR,
e, null);
+ }
+
+ }
+
+ protected static ClientKeyBlock innerEncode(RandomSource random,
FreenetURI uri, Bucket sourceData, boolean isMetadata, short compressionCodec,
int sourceLength) throws InsertException, CHKEncodeException, IOException,
SSKEncodeException, MalformedURLException {
String uriType = uri.getKeyType();
if(uriType.equals("CHK")) {
- try {
- return ClientCHKBlock.encode(sourceData,
isMetadata, compressionCodec == -1, compressionCodec, sourceLength);
- } catch (CHKEncodeException e) {
- Logger.error(this, "Caught "+e, e);
- throw new
InsertException(InsertException.INTERNAL_ERROR, e, null);
- } catch (IOException e) {
- Logger.error(this, "Caught "+e+" encoding data
"+sourceData, e);
- throw new
InsertException(InsertException.BUCKET_ERROR, e, null);
- }
+ return ClientCHKBlock.encode(sourceData, isMetadata,
compressionCodec == -1, compressionCodec, sourceLength);
} else if(uriType.equals("SSK") || uriType.equals("KSK")) {
- try {
- InsertableClientSSK ik =
InsertableClientSSK.create(uri);
- return ik.encode(sourceData, isMetadata,
compressionCodec == -1, compressionCodec, sourceLength, ctx.random);
- } catch (MalformedURLException e) {
- throw new
InsertException(InsertException.INVALID_URI, e, null);
- } catch (SSKEncodeException e) {
- Logger.error(this, "Caught "+e, e);
- throw new
InsertException(InsertException.INTERNAL_ERROR, e, null);
- } catch (IOException e) {
- Logger.error(this, "Caught "+e, e);
- throw new
InsertException(InsertException.BUCKET_ERROR, e, null);
- }
+ InsertableClientSSK ik =
InsertableClientSSK.create(uri);
+ return ik.encode(sourceData, isMetadata,
compressionCodec == -1, compressionCodec, sourceLength, random);
} else {
throw new InsertException(InsertException.INVALID_URI,
"Unknown keytype "+uriType, null);
}
}
- protected ClientKeyBlock encode() throws InsertException {
+ protected void onEncode(ClientKey key, ObjectContainer container,
ClientContext context) {
+ synchronized(this) {
+ if(finished) return;
+ if(resultingURI != null) return;
+ resultingURI = key.getURI();
+ }
+ if(persistent) {
+ container.store(this);
+ container.activate(cb, 1);
+ }
+ cb.onEncode(key, this, container, context);
+ if(persistent)
+ container.deactivate(cb, 1);
+ }
+
+ protected ClientKeyBlock encode(ObjectContainer container,
ClientContext context, boolean calledByCB) throws InsertException {
+ if(persistent) {
+ container.activate(sourceData, 1);
+ container.activate(cb, 1);
+ }
ClientKeyBlock block;
boolean shouldSend;
synchronized(this) {
- if(refToClientKeyBlock != null) {
- block = (ClientKeyBlock)
refToClientKeyBlock.get();
- if(block != null) return block;
+ if(finished) return null;
+ if(sourceData == null) {
+ Logger.error(this, "Source data is null on
"+this+" but not finished!");
+ return null;
}
- block = innerEncode();
- refToClientKeyBlock =
- new SoftReference(block);
+ block = innerEncode(context.random, container);
shouldSend = (resultingURI == null);
resultingURI = block.getClientKey().getURI();
}
+ if(logMINOR)
+ Logger.minor(this, "Encoded "+resultingURI+" for
"+this+" shouldSend="+shouldSend+" dontSendEncoded="+dontSendEncoded);
if(shouldSend && !dontSendEncoded)
- cb.onEncode(block.getClientKey(), this);
+ cb.onEncode(block.getClientKey(), this, container,
context);
+ if(shouldSend && persistent)
+ container.store(this);
+ if(persistent && !calledByCB)
+ container.deactivate(cb, 1);
return block;
}
- public boolean isInsert() {
- return true;
+ public short getPriorityClass(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1);
+ return parent.getPriorityClass(); // Not much point deactivating
}
@Override
- public short getPriorityClass() {
- return parent.getPriorityClass();
- }
-
- @Override
public int getRetryCount() {
return retries;
}
@Override
- public void onFailure(LowLevelPutException e, Object keyNum) {
+ public void onFailure(LowLevelPutException e, Object keyNum,
ObjectContainer container, ClientContext context) {
+ synchronized(this) {
+ if(finished) return;
+ }
+ if(persistent)
+ container.activate(errors, 1);
if(parent.isCancelled()) {
- fail(new InsertException(InsertException.CANCELLED));
+ fail(new InsertException(InsertException.CANCELLED),
container, context);
return;
}
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "onFailure() on "+e+" for
"+this);
switch(e.code) {
case LowLevelPutException.COLLISION:
- fail(new InsertException(InsertException.COLLISION));
- break;
+ fail(new InsertException(InsertException.COLLISION),
container, context);
+ return;
case LowLevelPutException.INTERNAL_ERROR:
- errors.inc(InsertException.INTERNAL_ERROR);
- break;
+ fail(new
InsertException(InsertException.INTERNAL_ERROR), container, context);
+ return;
case LowLevelPutException.REJECTED_OVERLOAD:
errors.inc(InsertException.REJECTED_OVERLOAD);
break;
@@ -168,12 +231,14 @@
Logger.error(this, "Unknown LowLevelPutException code:
"+e.code);
errors.inc(InsertException.INTERNAL_ERROR);
}
+ if(persistent)
+ container.activate(ctx, 1);
if(e.code == LowLevelPutException.ROUTE_NOT_FOUND || e.code ==
LowLevelPutException.ROUTE_REALLY_NOT_FOUND) {
consecutiveRNFs++;
if(logMINOR) Logger.minor(this, "Consecutive RNFs:
"+consecutiveRNFs+" / "+ctx.consecutiveRNFsCountAsSuccess);
if(consecutiveRNFs ==
ctx.consecutiveRNFsCountAsSuccess) {
if(logMINOR) Logger.minor(this, "Consecutive
RNFs: "+consecutiveRNFs+" - counting as success");
- onSuccess(keyNum);
+ onSuccess(keyNum, container, context);
return;
}
} else
@@ -181,79 +246,119 @@
if(logMINOR) Logger.minor(this, "Failed: "+e);
retries++;
if((retries > ctx.maxInsertRetries) && (ctx.maxInsertRetries !=
-1)) {
- fail(InsertException.construct(errors));
+ fail(InsertException.construct(persistent ?
errors.clone() : errors), container, context);
+ if(persistent)
+ container.deactivate(ctx, 1);
return;
}
- getScheduler().register(this);
+ if(persistent) {
+ container.store(this);
+ container.deactivate(ctx, 1);
+ }
+ getScheduler(context).registerInsert(this, persistent, false,
true, container);
}
- private void fail(InsertException e) {
- fail(e, false);
+ private void fail(InsertException e, ObjectContainer container,
ClientContext context) {
+ fail(e, false, container, context);
}
- private void fail(InsertException e, boolean forceFatal) {
+ private void fail(InsertException e, boolean forceFatal,
ObjectContainer container, ClientContext context) {
synchronized(this) {
if(finished) return;
finished = true;
}
+ if(persistent)
+ container.store(this);
if(e.isFatal() || forceFatal)
- parent.fatallyFailedBlock();
+ parent.fatallyFailedBlock(container, context);
else
- parent.failedBlock();
- cb.onFailure(e, this);
- if(freeData)
+ parent.failedBlock(container, context);
+ unregister(container, context);
+ if(freeData) {
+ if(persistent) container.activate(sourceData, 1);
sourceData.free();
+ if(persistent) sourceData.removeFrom(container);
+ sourceData = null;
+ if(persistent)
+ container.store(this);
+ }
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFailure(e, this, container, context);
}
- public ClientKeyBlock getBlock() {
+ public ClientKeyBlock getBlock(ObjectContainer container, ClientContext
context, boolean calledByCB) {
try {
synchronized (this) {
if(finished) return null;
}
- return encode();
+ if(persistent)
+ container.store(this);
+ return encode(container, context, calledByCB);
} catch (InsertException e) {
- cb.onFailure(e, this);
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFailure(e, this, container, context);
+ if(persistent && !calledByCB)
+ container.deactivate(cb, 1);
return null;
} catch (Throwable t) {
+ if(persistent)
+ container.activate(cb, 1);
Logger.error(this, "Caught "+t, t);
- cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR, t, null), this);
+ cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR, t, null), this, container,
context);
+ if(persistent && !calledByCB)
+ container.deactivate(cb, 1);
return null;
}
}
- public void schedule() throws InsertException {
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
synchronized(this) {
- if(finished) return;
+ if(finished) {
+ if(logMINOR)
+ Logger.minor(this, "Finished already:
"+this);
+ return;
+ }
}
if(getCHKOnly) {
- ClientKeyBlock block = encode();
- cb.onEncode(block.getClientKey(), this);
- parent.completedBlock(false);
- cb.onSuccess(this);
+ boolean deactivateCB = false;
+ if(persistent) {
+ deactivateCB = !container.ext().isActive(cb);
+ if(deactivateCB)
+ container.activate(cb, 1);
+ }
+ ClientKeyBlock block = encode(container, context, true);
+ cb.onEncode(block.getClientKey(), this, container,
context);
+ parent.completedBlock(false, container, context);
+ cb.onSuccess(this, container, context);
finished = true;
+ if(persistent) {
+ container.store(this);
+ if(deactivateCB)
+ container.deactivate(cb, 1);
+ }
} else {
- getScheduler().register(this);
+ getScheduler(context).registerInsert(this, persistent,
true, true, container);
}
}
- private ClientRequestScheduler getScheduler() {
- String uriType = uri.getKeyType();
- if(uriType.equals("CHK"))
- return parent.chkScheduler;
- else if(uriType.equals("SSK") || uriType.equals("KSK"))
- return parent.sskScheduler;
- else throw new IllegalArgumentException();
+ public boolean isSSK() {
+ return isSSK;
}
- public FreenetURI getURI() {
+ public FreenetURI getURI(ObjectContainer container, ClientContext
context) {
synchronized(this) {
- if(resultingURI != null)
- return resultingURI;
+ if(resultingURI != null) {
+ if(persistent) container.activate(resultingURI,
5);
+ return persistent ? resultingURI.clone() :
resultingURI;
+ }
}
- getBlock();
+ getBlock(container, context, true);
synchronized(this) {
// FIXME not really necessary? resultingURI is never
dropped, only set.
- return resultingURI;
+ if(persistent) container.activate(resultingURI, 5);
+ return persistent ? resultingURI.clone() : resultingURI;
}
}
@@ -262,78 +367,151 @@
}
@Override
- public void onSuccess(Object keyNum) {
+ public void onSuccess(Object keyNum, ObjectContainer container,
ClientContext context) {
if(logMINOR) Logger.minor(this, "Succeeded ("+this+"): "+token);
+ if(persistent)
+ container.activate(parent, 1);
if(parent.isCancelled()) {
- fail(new InsertException(InsertException.CANCELLED));
+ fail(new InsertException(InsertException.CANCELLED),
container, context);
return;
}
synchronized(this) {
+ if(finished) {
+ // Normal with persistence.
+ Logger.normal(this, "Block already completed:
"+this);
+ return;
+ }
finished = true;
}
- if(freeData)
+ if(persistent) {
+ container.store(this);
+ container.activate(sourceData, 1);
+ }
+ if(freeData) {
sourceData.free();
- parent.completedBlock(false);
- cb.onSuccess(this);
+ if(persistent) sourceData.removeFrom(container);
+ sourceData = null;
+ if(persistent)
+ container.store(this);
+ }
+ parent.completedBlock(false, container, context);
+ unregister(container, context);
+ if(persistent)
+ container.activate(cb, 1);
+ if(logMINOR) Logger.minor(this, "Calling onSuccess for "+cb);
+ cb.onSuccess(this, container, context);
+ if(persistent)
+ container.deactivate(cb, 1);
}
public BaseClientPutter getParent() {
return parent;
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
synchronized(this) {
if(finished) return;
finished = true;
}
- if(freeData)
+ boolean wasActive = true;
+ if(persistent) {
+ container.store(this);
+ wasActive = container.ext().isActive(cb);
+ if(!wasActive)
+ container.activate(cb, 1);
+ container.activate(sourceData, 1);
+ }
+ if(freeData) {
sourceData.free();
- super.unregister(false);
- cb.onFailure(new InsertException(InsertException.CANCELLED),
this);
+ if(persistent) sourceData.removeFrom(container);
+ sourceData = null;
+ if(persistent)
+ container.store(this);
+ }
+ super.unregister(container, context);
+ cb.onFailure(new InsertException(InsertException.CANCELLED),
this, container, context);
+ if(!wasActive)
+ container.deactivate(cb, 1);
}
- public synchronized boolean isEmpty() {
+ public synchronized boolean isEmpty(ObjectContainer container) {
return finished;
}
@Override
- public synchronized boolean isCancelled() {
+ public synchronized boolean isCancelled(ObjectContainer container) {
return finished;
}
@Override
- public boolean send(NodeClientCore core, RequestScheduler sched, Object
keyNum) {
- // Ignore keyNum, key, since we're only sending one block.
- try {
- if(logMINOR) Logger.minor(this, "Starting request:
"+this);
- ClientKeyBlock b = getBlock();
- if(b != null)
- core.realPut(b, ctx.cacheLocalRequests);
- else {
- synchronized(this) {
- if(finished) {
- Logger.error(this, "Trying to
run send "+this+" when already finished", new Exception("error"));
+ public SendableRequestSender getSender(ObjectContainer container,
ClientContext context) {
+ return new SendableRequestSender() {
+
+ public boolean send(NodeClientCore core,
RequestScheduler sched, final ClientContext context, ChosenBlock req) {
+ // Ignore keyNum, key, since we're only sending
one block.
+ try {
+ if(logMINOR) Logger.minor(this,
"Starting request: "+SingleBlockInserter.this);
+ BlockItem block = (BlockItem) req.token;
+ ClientKeyBlock b;
+ try {
+ b = innerEncode(context.random,
block.uri, block.copyBucket, block.isMetadata, block.compressionCodec,
block.sourceLength);
+ } catch (CHKEncodeException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (SSKEncodeException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (MalformedURLException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (InsertException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (IOException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } finally {
+ block.copyBucket.free();
+ }
+ final ClientKey key = b.getClientKey();
+ if(block.persistent) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(!container.ext().isStored(SingleBlockInserter.this)) return;
+
container.activate(SingleBlockInserter.this, 1);
+ onEncode(key,
container, context);
+
container.deactivate(SingleBlockInserter.this, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+
context.mainExecutor.execute(new Runnable() {
+
+ public void run() {
+ onEncode(key,
null, context);
+ }
+
+ }, "Got URI");
+
+ }
+ if(b != null)
+ core.realPut(b,
req.cacheLocalRequests);
+ else {
+ Logger.error(this, "Asked to
send empty block on "+SingleBlockInserter.this, new Exception("error"));
return false;
}
+ } catch (LowLevelPutException e) {
+ req.onFailure(e, context);
+ if(logMINOR) Logger.minor(this,
"Request failed: "+SingleBlockInserter.this+" for "+e);
+ return true;
}
- if(parent.isCancelled())
- fail(new
InsertException(InsertException.CANCELLED));
- else
- fail(new
InsertException(InsertException.BUCKET_ERROR, "Empty block", null));
- return false;
+ if(logMINOR) Logger.minor(this, "Request
succeeded: "+SingleBlockInserter.this);
+ req.onInsertSuccess(context);
+ return true;
}
- } catch (LowLevelPutException e) {
- onFailure(e, keyNum);
- if(logMINOR) Logger.minor(this, "Request failed:
"+this+" for "+e);
- return true;
- }
- if(logMINOR) Logger.minor(this, "Request succeeded: "+this);
- onSuccess(keyNum);
- return true;
+
+ };
}
@Override
- public Object getClient() {
+ public RequestClient getClient(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1);
return parent.getClient();
}
@@ -351,11 +529,15 @@
}
/** Attempt to encode the block, if necessary */
- public void tryEncode() {
+ public void tryEncode(ObjectContainer container, ClientContext context)
{
+ synchronized(this) {
+ if(resultingURI != null) return;
+ if(finished) return;
+ }
try {
- encode();
+ encode(container, context, false);
} catch (InsertException e) {
- fail(e);
+ fail(e, container, context);
} catch (Throwable t) {
Logger.error(this, "Caught "+t, t);
// Don't requeue on BackgroundBlockEncoder.
@@ -363,27 +545,189 @@
}
}
- public boolean canRemove() {
- return true;
- }
-
@Override
- public synchronized Object[] sendableKeys() {
+ public synchronized SendableRequestItem[] sendableKeys(ObjectContainer
container, ClientContext context) {
if(finished)
- return new Object[] {};
+ return new SendableRequestItem[] {};
else
- return new Object[] { Integer.valueOf(0) };
+ return new SendableRequestItem[] {
NullSendableRequestItem.nullItem };
}
@Override
- public synchronized Object[] allKeys() {
- return sendableKeys();
+ public synchronized SendableRequestItem[] allKeys(ObjectContainer
container, ClientContext context) {
+ return sendableKeys(container, context);
}
@Override
- public synchronized Object chooseKey(KeysFetchingLocally ignored) {
+ public synchronized SendableRequestItem chooseKey(KeysFetchingLocally
ignored, ObjectContainer container, ClientContext context) {
if(finished) return null;
- else return Integer.valueOf(0);
+ if(!persistent) {
+ if(ignored.hasTransientInsert(this, new
FakeBlockItem()))
+ return null;
+ }
+ return getBlockItem(container, context);
}
+ private BlockItem getBlockItem(ObjectContainer container, ClientContext
context) {
+ try {
+ synchronized(this) {
+ if(finished) return null;
+ }
+ if(persistent) {
+ if(sourceData == null) {
+ Logger.error(this, "getBlockItem():
sourceData = null but active = "+container.ext().isActive(this));
+ return null;
+ }
+ }
+ boolean deactivateBucket = false;
+ if(persistent) {
+ container.activate(uri, 1);
+ deactivateBucket =
!container.ext().isActive(sourceData);
+ if(deactivateBucket)
+ container.activate(sourceData, 1);
+ }
+ Bucket data = sourceData.createShadow();
+ FreenetURI u = uri;
+ if(u.getKeyType().equals("CHK") && !persistent) u =
FreenetURI.EMPTY_CHK_URI;
+ else u = u.clone();
+ if(data == null) {
+ data =
context.tempBucketFactory.makeBucket(sourceData.size());
+ BucketTools.copy(sourceData, data);
+ }
+ if(persistent) {
+ if(deactivateBucket)
+ container.deactivate(sourceData, 1);
+ container.deactivate(uri, 1);
+ }
+ return new BlockItem(this, data, isMetadata,
compressionCodec, sourceLength, u, hashCode(), persistent);
+ } catch (IOException e) {
+ fail(new InsertException(InsertException.BUCKET_ERROR,
e, null), container, context);
+ return null;
+ }
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ BlockItem item = getBlockItem(container, context);
+ if(item == null) return null;
+ PersistentChosenBlock block = new PersistentChosenBlock(true,
request, item, null, null, sched);
+ return Collections.singletonList(block);
+ }
+
+ private static class BlockItem implements SendableRequestItem {
+
+ private final boolean persistent;
+ private final Bucket copyBucket;
+ private final boolean isMetadata;
+ private final short compressionCodec;
+ private final int sourceLength;
+ private final FreenetURI uri;
+ private final int hashCode;
+ /** STRICTLY for purposes of equals() !!! */
+ private final SingleBlockInserter parent;
+
+ BlockItem(SingleBlockInserter parent, Bucket bucket, boolean
meta, short codec, int srclen, FreenetURI u, int hashCode, boolean persistent)
throws IOException {
+ this.parent = parent;
+ this.copyBucket = bucket;
+ this.isMetadata = meta;
+ this.compressionCodec = codec;
+ this.sourceLength = srclen;
+ this.uri = u;
+ this.hashCode = hashCode;
+ this.persistent = persistent;
+ }
+
+ public void dump() {
+ copyBucket.free();
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(o instanceof BlockItem) {
+ if(((BlockItem)o).parent == parent) return true;
+ } else if(o instanceof FakeBlockItem) {
+ if(((FakeBlockItem)o).getParent() == parent)
return true;
+ }
+ return false;
+ }
+
+ }
+
+ // Used for testing whether a block is already queued.
+ private class FakeBlockItem implements SendableRequestItem {
+
+ public void dump() {
+ // Do nothing
+ }
+
+ public SingleBlockInserter getParent() {
+ return SingleBlockInserter.this;
+ }
+
+ public int hashCode() {
+ return SingleBlockInserter.this.hashCode();
+ }
+
+ public boolean equals(Object o) {
+ if(o instanceof BlockItem) {
+ if(((BlockItem)o).parent ==
SingleBlockInserter.this) return true;
+ } else if(o instanceof FakeBlockItem) {
+ if(((FakeBlockItem)o).getParent() ==
SingleBlockInserter.this) return true;
+ }
+ return false;
+ }
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removeFrom() on "+this);
+ // FIXME remove sourceData ???
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ if(resultingURI != null) {
+ container.activate(resultingURI, 5);
+ resultingURI.removeFrom(container);
+ }
+ // cb, parent are responsible for removing themselves
+ // ctx is passed in and unmodified - usually the ClientPutter
removes it
+ container.activate(errors, 5);
+ errors.removeFrom(container);
+ if(freeData && sourceData != null &&
container.ext().isStored(sourceData)) {
+ Logger.error(this, "Data not removed!");
+ container.activate(sourceData, 1);
+ sourceData.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ @Override
+ public boolean cacheInserts(ObjectContainer container) {
+ boolean deactivate = false;
+ if(persistent) {
+ deactivate = !container.ext().isActive(ctx);
+ if(deactivate)
+ container.activate(ctx, 1);
+ }
+ boolean retval = ctx.cacheLocalRequests;
+ if(deactivate)
+ container.deactivate(ctx, 1);
+ return retval;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(finished) {
+ Logger.error(this, "objectCanNew when already finished
on "+this);
+ return false;
+ }
+ Logger.minor(this, "objectCanNew() on "+this, new
Exception("debug"));
+ return true;
+ }
+//
+// public boolean objectCanUpdate(ObjectContainer container) {
+// Logger.minor(this, "objectCanUpdate() on "+this, new
Exception("debug"));
+// return true;
+// }
+//
}
Modified: trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,18 +5,22 @@
import java.io.IOException;
import java.net.MalformedURLException;
+import java.util.ArrayList;
import java.util.LinkedList;
+import java.util.List;
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveContext;
import freenet.client.ArchiveExtractCallback;
import freenet.client.ArchiveFailureException;
+import freenet.client.ArchiveHandler;
import freenet.client.ArchiveManager;
import freenet.client.ArchiveRestartException;
-import freenet.client.ArchiveStoreContext;
import freenet.client.ClientMetadata;
+import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
-import freenet.client.FetchContext;
import freenet.client.Metadata;
import freenet.client.MetadataParseException;
import freenet.keys.BaseClientKey;
@@ -26,7 +30,7 @@
import freenet.keys.ClientSSK;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
-import freenet.node.RequestScheduler;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.compress.CompressionOutputSizeException;
@@ -35,11 +39,22 @@
public class SingleFileFetcher extends SimpleSingleFileFetcher {
- private static boolean logMINOR;
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
/** Original URI */
final FreenetURI uri;
/** Meta-strings. (Path elements that aren't part of a key type) */
- final LinkedList metaStrings;
+ private final ArrayList<String> metaStrings;
/** Number of metaStrings which were added by redirects etc. They are
added to the start, so this is decremented
* when we consume one. */
private int addedMetaStrings;
@@ -48,7 +63,7 @@
private Metadata archiveMetadata;
final ArchiveContext actx;
/** Archive handler. We can only have one archive handler at a time. */
- private ArchiveStoreContext ah;
+ private ArchiveHandler ah;
private int recursionLevel;
/** The URI of the currently-being-processed data, for archives etc. */
private FreenetURI thisKey;
@@ -57,34 +72,38 @@
private final Bucket returnBucket;
/** If true, success/failure is immediately reported to the client, and
therefore we can check TOO_MANY_PATH_COMPONENTS. */
private final boolean isFinal;
- private RequestScheduler sched;
/** Create a new SingleFileFetcher and register self.
* Called when following a redirect, or direct from ClientGet.
* FIXME: Many times where this is called internally we might be better
off using a copy constructor?
*/
public SingleFileFetcher(ClientRequester parent, GetCompletionCallback
cb, ClientMetadata metadata,
- ClientKey key, LinkedList metaStrings, FreenetURI
origURI, int addedMetaStrings, FetchContext ctx,
- ArchiveContext actx, ArchiveStoreContext ah, Metadata
archiveMetadata, int maxRetries, int recursionLevel,
+ ClientKey key, List<String> metaStrings, FreenetURI
origURI, int addedMetaStrings, FetchContext ctx, boolean deleteFetchContext,
+ ArchiveContext actx, ArchiveHandler ah, Metadata
archiveMetadata, int maxRetries, int recursionLevel,
boolean dontTellClientGet, long l, boolean isEssential,
- Bucket returnBucket, boolean isFinal) throws
FetchException {
- super(key, maxRetries, ctx, parent, cb, isEssential, false, l);
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+key+" from "+origURI+" meta="+metaStrings.toString(), new Exception("debug"));
+ Bucket returnBucket, boolean isFinal, ObjectContainer
container, ClientContext context) throws FetchException {
+ super(key, maxRetries, ctx, parent, cb, isEssential, false, l,
container, context, deleteFetchContext);
+ if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+key+" from "+origURI+" meta="+metaStrings.toString()+"
persistent="+persistent, new Exception("debug"));
this.isFinal = isFinal;
this.cancelled = false;
this.returnBucket = returnBucket;
this.dontTellClientGet = dontTellClientGet;
this.ah = ah;
+ if(persistent && ah != null) ah = ah.cloneHandler();
this.archiveMetadata = archiveMetadata;
//this.uri = uri;
//this.key = ClientKey.getBaseKey(uri);
//metaStrings = uri.listMetaStrings();
- this.metaStrings = metaStrings;
+ if(metaStrings instanceof ArrayList && !persistent)
+ this.metaStrings = (ArrayList<String>)metaStrings;
+ else
+ // Always copy if persistent
+ this.metaStrings = new ArrayList<String>(metaStrings);
this.addedMetaStrings = addedMetaStrings;
- this.clientMetadata = metadata;
+ this.clientMetadata = (metadata != null ? (ClientMetadata)
metadata.clone() : new ClientMetadata());
thisKey = key.getURI();
- this.uri = origURI;
+ if(origURI == null) throw new NullPointerException();
+ this.uri = persistent ? origURI.clone() : origURI;
this.actx = actx;
this.recursionLevel = recursionLevel + 1;
if(recursionLevel > ctx.maxRecursionLevel)
@@ -95,10 +114,9 @@
/** Copy constructor, modifies a few given fields, don't call
schedule().
* Used for things like slave fetchers for MultiLevelMetadata,
therefore does not remember returnBucket,
* metaStrings etc. */
- public SingleFileFetcher(SingleFileFetcher fetcher, Metadata newMeta,
GetCompletionCallback callback, FetchContext ctx2) throws FetchException {
+ public SingleFileFetcher(SingleFileFetcher fetcher, boolean persistent,
boolean deleteFetchContext, Metadata newMeta, GetCompletionCallback callback,
FetchContext ctx2, ObjectContainer container, ClientContext context) throws
FetchException {
// Don't add a block, we have already fetched the data, we are
just handling the metadata in a different fetcher.
- super(fetcher.key, fetcher.maxRetries, ctx2, fetcher.parent,
callback, false, true, fetcher.token);
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ super(persistent ? fetcher.key.cloneKey() : fetcher.key,
fetcher.maxRetries, ctx2, fetcher.parent, callback, false, true, fetcher.token,
container, context, deleteFetchContext);
if(logMINOR) Logger.minor(this, "Creating SingleFileFetcher for
"+fetcher.key+" meta="+fetcher.metaStrings.toString(), new Exception("debug"));
this.returnBucket = null;
// We expect significant further processing in the parent
@@ -106,34 +124,42 @@
this.dontTellClientGet = fetcher.dontTellClientGet;
this.actx = fetcher.actx;
this.ah = fetcher.ah;
- this.archiveMetadata = fetcher.archiveMetadata;
- this.clientMetadata = (fetcher.clientMetadata != null ?
(ClientMetadata) fetcher.clientMetadata.clone() : null);
+ if(persistent && ah != null) ah = ah.cloneHandler();
+ this.archiveMetadata = null;
+ this.clientMetadata = (fetcher.clientMetadata != null ?
(ClientMetadata) fetcher.clientMetadata.clone() : new ClientMetadata());
this.metadata = newMeta;
- this.metaStrings = new LinkedList();
+ this.metaStrings = new ArrayList<String>();
this.addedMetaStrings = 0;
this.recursionLevel = fetcher.recursionLevel + 1;
if(recursionLevel > ctx.maxRecursionLevel)
throw new
FetchException(FetchException.TOO_MUCH_RECURSION);
- this.thisKey = fetcher.thisKey;
- this.decompressors = fetcher.decompressors;
- this.uri = fetcher.uri;
+ this.thisKey = persistent ? fetcher.thisKey.clone() :
fetcher.thisKey;
+ // Copy the decompressors. Just because a multi-level metadata
splitfile
+ // is compressed, that **doesn't** mean that the data we are
eventually
+ // going to fetch is!
+ this.decompressors = new
LinkedList<COMPRESSOR_TYPE>(fetcher.decompressors);
+ if(fetcher.uri == null) throw new NullPointerException();
+ this.uri = persistent ? fetcher.uri.clone() : fetcher.uri;
}
// Process the completed data. May result in us going to a
// splitfile, or another SingleFileFetcher, etc.
@Override
- public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, RequestScheduler sched) {
- this.sched = sched;
+ public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(ctx, 1);
+ }
if(parent instanceof ClientGetter)
- ((ClientGetter)parent).addKeyToBinaryBlob(block);
- parent.completedBlock(fromStore);
+ ((ClientGetter)parent).addKeyToBinaryBlob(block,
container, context);
+ parent.completedBlock(fromStore, container, context);
// Extract data
if(block == null) {
Logger.error(this, "block is null!
fromStore="+fromStore+", token="+token, new Exception("error"));
return;
}
- Bucket data = extract(block, sched);
+ Bucket data = extract(block, container, context);
if(data == null) {
if(logMINOR)
Logger.minor(this, "No data");
@@ -143,50 +169,70 @@
if(logMINOR)
Logger.minor(this, "Block "+(block.isMetadata() ? "is
metadata" : "is not metadata")+" on "+this);
if(!block.isMetadata()) {
- onSuccess(new FetchResult(clientMetadata, data), sched);
+ onSuccess(new FetchResult(clientMetadata, data),
container, context);
} else {
if(!ctx.followRedirects) {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, "Told me not to follow
redirects (splitfile block??)"), sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, "Told me not to follow
redirects (splitfile block??)"), false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
return;
}
if(parent.isCancelled()) {
- onFailure(new
FetchException(FetchException.CANCELLED), sched);
+ onFailure(new
FetchException(FetchException.CANCELLED), false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
return;
}
if(data.size() > ctx.maxMetadataSize) {
- onFailure(new
FetchException(FetchException.TOO_BIG_METADATA), sched);
+ onFailure(new
FetchException(FetchException.TOO_BIG_METADATA), false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
return;
}
// Parse metadata
try {
+ removeMetadata(container);
metadata = Metadata.construct(data);
- wrapHandleMetadata(false);
+ if(persistent)
+ container.store(this);
+ wrapHandleMetadata(false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
} catch (MetadataParseException e) {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, e), sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, e), false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
return;
} catch (IOException e) {
// Bucket error?
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), false, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
return;
}
}
}
@Override
- protected void onSuccess(FetchResult result, RequestScheduler sched) {
- this.sched = sched;
- unregister(false);
+ protected void onSuccess(FetchResult result, ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(decompressors, 1);
+ container.activate(parent, 1);
+ container.activate(ctx, 1);
+ container.activate(rcb, 1);
+ }
+ removeMetadata(container);
+ removeArchiveMetadata(container);
+ synchronized(this) {
+ // So a SingleKeyListener isn't created.
+ finished = true;
+ }
if(parent.isCancelled()) {
if(logMINOR)
Logger.minor(this, "Parent is cancelled");
result.asBucket().free();
- onFailure(new FetchException(FetchException.CANCELLED),
sched);
+ if(persistent) result.asBucket().removeFrom(container);
+ onFailure(new FetchException(FetchException.CANCELLED),
false, container, context);
return;
}
if(!decompressors.isEmpty()) {
@@ -197,18 +243,25 @@
long maxLen =
Math.max(ctx.maxTempLength, ctx.maxOutputLength);
if(logMINOR)
Logger.minor(this,
"Decompressing "+data+" size "+data.size()+" max length "+maxLen);
- data = c.decompress(data,
ctx.bucketFactory, maxLen, maxLen * 4, decompressors.isEmpty() ? returnBucket :
null);
+ Bucket out = decompressors.isEmpty() ?
returnBucket : null;
+ data = c.decompress(data,
context.getBucketFactory(parent.persistent()), maxLen, maxLen * 4, out);
if(logMINOR)
Logger.minor(this,
"Decompressed to "+data+" size "+data.size());
} catch (IOException e) {
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), false, container, context);
return;
} catch (CompressionOutputSizeException e) {
- onFailure(new
FetchException(FetchException.TOO_BIG, e.estimatedSize, (rcb == parent),
result.getMimeType()), sched);
+ if(logMINOR)
+ Logger.minor(this, "Too big:
limit="+ctx.maxOutputLength+" temp="+ctx.maxTempLength);
+ onFailure(new
FetchException(FetchException.TOO_BIG, e.estimatedSize, (rcb == parent),
result.getMimeType()), false, container, context);
return;
}
}
result = new FetchResult(result, data);
+ if(persistent) {
+ container.store(this);
+ container.store(decompressors);
+ }
}
if((!ctx.ignoreTooManyPathComponents) &&
(!metaStrings.isEmpty()) && isFinal) {
// Some meta-strings left
@@ -217,7 +270,7 @@
// It would be useful to be able to fetch the
data ...
// On the other hand such inserts could cause
unpredictable results?
// Would be useful to make a redirect to the
key we actually fetched.
- rcb.onFailure(new
FetchException(FetchException.INVALID_METADATA, "Invalid metadata: too many
path components in redirects", thisKey), this);
+ rcb.onFailure(new
FetchException(FetchException.INVALID_METADATA, "Invalid metadata: too many
path components in redirects", thisKey), this, container, context);
} else {
// TOO_MANY_PATH_COMPONENTS
// report to user
@@ -226,15 +279,17 @@
}
FreenetURI tryURI = uri;
tryURI =
tryURI.dropLastMetaStrings(metaStrings.size());
- rcb.onFailure(new
FetchException(FetchException.TOO_MANY_PATH_COMPONENTS, result.size(), (rcb ==
parent), result.getMimeType(), tryURI), this);
+ rcb.onFailure(new
FetchException(FetchException.TOO_MANY_PATH_COMPONENTS, result.size(), (rcb ==
parent), result.getMimeType(), tryURI), this, container, context);
}
result.asBucket().free();
+ if(persistent) result.asBucket().removeFrom(container);
return;
} else if(result.size() > ctx.maxOutputLength) {
- rcb.onFailure(new
FetchException(FetchException.TOO_BIG, result.size(), (rcb == parent),
result.getMimeType()), this);
+ rcb.onFailure(new
FetchException(FetchException.TOO_BIG, result.size(), (rcb == parent),
result.getMimeType()), this, container, context);
result.asBucket().free();
+ if(persistent) result.asBucket().removeFrom(container);
} else {
- rcb.onSuccess(result, this);
+ rcb.onSuccess(result, this, container, context);
}
}
@@ -247,7 +302,37 @@
* @throws ArchiveFailureException
* @throws ArchiveRestartException
*/
- private synchronized void handleMetadata() throws FetchException,
MetadataParseException, ArchiveFailureException, ArchiveRestartException {
+ private synchronized void handleMetadata(final ObjectContainer
container, final ClientContext context) throws FetchException,
MetadataParseException, ArchiveFailureException, ArchiveRestartException {
+ if(persistent) {
+ container.activate(this, 2);
+ // ,1's are probably redundant
+ container.activate(metadata, 100);
+ container.activate(metaStrings, Integer.MAX_VALUE);
+ container.activate(thisKey, 5);
+ container.activate(ctx, 2); // for event producer and
allowed mime types
+ if(ah != null)
+ ah.activateForExecution(container);
+ container.activate(parent, 1);
+ container.activate(actx, 5);
+ container.activate(clientMetadata, 5);
+ container.activate(rcb, 1);
+ container.activate(returnBucket, 5);
+ }
+ if(uri == null) {
+ if(container != null) {
+ if(container.ext().isActive(this))
+ throw new NullPointerException("SFI
"+this+" is active and uri is null!");
+ else
+ throw new NullPointerException("SFI
"+this+" is not active!");
+ } else
+ throw new NullPointerException("uri = null on
transient SFI?? "+this);
+ }
+ synchronized(this) {
+ if(cancelled)
+ return;
+ // So a SingleKeyListener isn't created.
+ finished = true;
+ }
while(true) {
if(metadata.isSimpleManifest()) {
if(logMINOR) Logger.minor(this, "Is simple
manifest");
@@ -256,14 +341,34 @@
throw new
FetchException(FetchException.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null,
uri.addMetaStrings(new String[] { "" }));
else name = removeMetaString();
// Since metadata is a document, we just
replace metadata here
- if(logMINOR) Logger.minor(this, "Next
meta-string: "+name);
+ if(logMINOR) Logger.minor(this, "Next
meta-string: "+name+" length "+name.length()+" for "+this);
if(name == null) {
- metadata =
metadata.getDefaultDocument();
+ if(!persistent) {
+ metadata =
metadata.getDefaultDocument();
+ } else {
+ Metadata newMeta =
metadata.grabDefaultDocument();
+ metadata.removeFrom(container);
+ metadata = newMeta;
+ container.store(this);
+ container.store(metaStrings);
+ }
if(metadata == null)
throw new
FetchException(FetchException.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null,
uri.addMetaStrings(new String[] { "" }));
} else {
- metadata = metadata.getDocument(name);
- thisKey = thisKey.pushMetaString(name);
+ if(!persistent) {
+ metadata =
metadata.getDocument(name);
+ thisKey =
thisKey.pushMetaString(name);
+ } else {
+ Metadata newMeta =
metadata.grabDocument(name);
+ metadata.removeFrom(container);
+ metadata = newMeta;
+ FreenetURI oldThisKey = thisKey;
+ thisKey =
thisKey.pushMetaString(name);
+ container.store(this);
+ container.store(metaStrings);
+ container.store(thisKey);
+
oldThisKey.removeFrom(container);
+ }
if(metadata == null)
throw new
FetchException(FetchException.NOT_IN_ARCHIVE, "can't find "+name);
}
@@ -273,6 +378,7 @@
if(metaStrings.isEmpty() &&
ctx.returnZIPManifests) {
// Just return the archive, whole.
metadata.setSimpleRedirect();
+ if(persistent)
container.store(metadata);
continue;
}
// First we need the archive metadata.
@@ -280,39 +386,72 @@
// It's more efficient to keep the existing ah
if we can, and it is vital in
// the case of binary blobs.
if(ah == null || !ah.getKey().equals(thisKey))
- ah = (ArchiveStoreContext)
ctx.archiveManager.makeHandler(thisKey, metadata.getArchiveType(),
metadata.getCompressionCodec(), false,
- (parent instanceof
ClientGetter ? ((ClientGetter)parent).collectingBinaryBlob() : false));
+ ah =
context.archiveManager.makeHandler(thisKey, metadata.getArchiveType(),
metadata.getCompressionCodec(),
+ (parent instanceof
ClientGetter ? ((ClientGetter)parent).collectingBinaryBlob() : false),
persistent);
archiveMetadata = metadata;
+ metadata = null; // Copied to archiveMetadata,
so do not need to clear it
// ah is set. This means we are currently
handling an archive.
Bucket metadataBucket;
- metadataBucket = ah.getMetadata(actx, null,
recursionLevel+1, true);
+ metadataBucket = ah.getMetadata(actx, null,
recursionLevel+1, true, context.archiveManager);
if(metadataBucket != null) {
try {
metadata =
Metadata.construct(metadataBucket);
+ metadataBucket.free();
} catch (IOException e) {
// Bucket error?
throw new
FetchException(FetchException.BUCKET_ERROR, e);
}
+ if(persistent) container.store(this);
} else {
+ final boolean persistent =
this.persistent;
fetchArchive(false, archiveMetadata,
ArchiveManager.METADATA_NAME, new ArchiveExtractCallback() {
- public void gotBucket(Bucket
data) {
+ public void gotBucket(Bucket
data, ObjectContainer container, ClientContext context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+ if(logMINOR)
Logger.minor(this, "gotBucket on "+SingleFileFetcher.this+"
persistent="+persistent);
try {
metadata =
Metadata.construct(data);
+ data.free();
+ if(persistent)
data.removeFrom(container);
+
wrapHandleMetadata(true, container, context);
} catch
(MetadataParseException e) {
// Invalid
metadata
- onFailure(new
FetchException(FetchException.INVALID_METADATA, e), sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, e), false, container, context);
return;
} catch (IOException e)
{
// Bucket error?
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), false, container, context);
return;
}
-
wrapHandleMetadata(true);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
}
- public void notInArchive() {
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR, "No metadata in container! Cannot
happen as ArchiveManager should synthesise some!"), sched);
+ public void
notInArchive(ObjectContainer container, ClientContext context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR, "No metadata in container! Cannot
happen as ArchiveManager should synthesise some!"), false, container, context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
}
- }); // will result in this function
being called again
+ public void
onFailed(ArchiveRestartException e, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+
SingleFileFetcher.this.onFailure(new FetchException(e), false, container,
context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
+ }
+ public void
onFailed(ArchiveFailureException e, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+
SingleFileFetcher.this.onFailure(new FetchException(e), false, container,
context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
+ }
+ public void
removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+ }, container, context); // will result
in this function being called again
+ if(persistent) container.store(this);
return;
}
metadataBucket.free();
@@ -320,8 +459,9 @@
} else if(metadata.isArchiveInternalRedirect()) {
if(logMINOR) Logger.minor(this, "Is
archive-internal redirect");
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata());
+ if(persistent) container.store(clientMetadata);
String mime = clientMetadata.getMIMEType();
- if(mime != null) rcb.onExpectedMIME(mime);
+ if(mime != null) rcb.onExpectedMIME(mime,
container);
if(metaStrings.isEmpty() && isFinal &&
clientMetadata.getMIMETypeNoParams() != null && ctx.allowedMIMETypes != null &&
!ctx.allowedMIMETypes.contains(clientMetadata.getMIMETypeNoParams())) {
throw new
FetchException(FetchException.WRONG_MIME_TYPE, -1, false,
clientMetadata.getMIMEType());
@@ -331,13 +471,13 @@
throw new
FetchException(FetchException.UNKNOWN_METADATA, "Archive redirect not in an
archive manifest");
String filename = metadata.getZIPInternalName();
if(logMINOR) Logger.minor(this, "Fetching
"+filename);
- Bucket dataBucket = ah.get(filename, actx,
null, recursionLevel+1, true);
+ Bucket dataBucket = ah.get(filename, actx,
null, recursionLevel+1, true, context.archiveManager);
if(dataBucket != null) {
if(logMINOR) Logger.minor(this,
"Returning data");
final Bucket out;
try {
// Data will not be freed until
client is finished with it.
- if(returnBucket != null) {
+ if(returnBucket != null ||
persistent) {
out = returnBucket;
BucketTools.copy(dataBucket, out);
dataBucket.free();
@@ -348,11 +488,7 @@
throw new
FetchException(FetchException.BUCKET_ERROR);
}
// Return the data
- ctx.executor.execute(new Runnable() {
- public void run() {
- onSuccess(new
FetchResult(clientMetadata, out), sched);
- }
- }, "SingleFileFetcher onSuccess
callback for "+this);
+ onSuccess(new
FetchResult(clientMetadata, out), container, context);
return;
} else {
@@ -360,30 +496,61 @@
// Metadata cannot contain pointers to
files which don't exist.
// We enforce this in ArchiveHandler.
// Therefore, the archive needs to be
fetched.
+ final boolean persistent =
this.persistent;
fetchArchive(true, archiveMetadata,
filename, new ArchiveExtractCallback() {
- public void gotBucket(Bucket
data) {
+ public void gotBucket(Bucket
data, ObjectContainer container, ClientContext context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
if(logMINOR)
Logger.minor(this, "Returning data");
Bucket out;
try {
// Data will
not be freed until client is finished with it.
if(returnBucket
!= null) {
+
if(persistent)
+
container.activate(returnBucket, 5);
out =
returnBucket;
BucketTools.copy(data, out);
data.free();
+
if(persistent)
+
data.removeFrom(container);
} else {
out =
data;
}
} catch (IOException e)
{
- onFailure(new
FetchException(FetchException.BUCKET_ERROR), sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR), false, container, context);
return;
}
// Return the data
- onSuccess(new
FetchResult(clientMetadata, out), sched);
+ onSuccess(new
FetchResult(clientMetadata, out), container, context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
}
- public void notInArchive() {
- onFailure(new
FetchException(FetchException.NOT_IN_ARCHIVE), sched);
+ public void
notInArchive(ObjectContainer container, ClientContext context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+ onFailure(new
FetchException(FetchException.NOT_IN_ARCHIVE), false, container, context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
}
- });
+ public void
onFailed(ArchiveRestartException e, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+
SingleFileFetcher.this.onFailure(new FetchException(e), false, container,
context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
+ }
+ public void
onFailed(ArchiveFailureException e, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+
container.activate(SingleFileFetcher.this, 1);
+
SingleFileFetcher.this.onFailure(new FetchException(e), false, container,
context);
+ if(persistent)
+
container.deactivate(SingleFileFetcher.this, 1);
+ }
+ public void
removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+ }, container, context);
+ removeMetadata(container);
// Will call back into this function
when it has been fetched.
return;
}
@@ -391,27 +558,34 @@
if(logMINOR) Logger.minor(this, "Is multi-level
metadata");
// Fetch on a second SingleFileFetcher, like
with archives.
metadata.setSimpleRedirect();
- final SingleFileFetcher f = new
SingleFileFetcher(this, metadata, new MultiLevelMetadataCallback(), ctx);
+ final SingleFileFetcher f = new
SingleFileFetcher(this, persistent, false, metadata, new
MultiLevelMetadataCallback(), ctx, container, context);
// Clear our own metadata so it can be garbage
collected, it will be replaced by whatever is fetched.
+ // The new fetcher has our metadata so we don't
need to removeMetadata().
this.metadata = null;
- ctx.ticker.queueTimedJob(new Runnable() {
- public void run() {
- f.wrapHandleMetadata(true);
- }
- }, 0);
+ if(persistent) container.store(this);
+ if(persistent) container.store(f);
+
+ // We must transition to the sub-fetcher so
that if the request is cancelled, it will get deleted.
+ parent.onTransition(this, f, container);
+
+ f.wrapHandleMetadata(true, container, context);
+ if(persistent) container.deactivate(f, 1);
return;
} else if(metadata.isSingleFileRedirect()) {
if(logMINOR) Logger.minor(this, "Is single-file
redirect");
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even
splitfiles can have mime types!
+ if(persistent) container.store(clientMetadata);
String mime = clientMetadata.getMIMEType();
- if(mime != null) rcb.onExpectedMIME(mime);
+ if(mime != null) rcb.onExpectedMIME(mime,
container);
String mimeType =
clientMetadata.getMIMETypeNoParams();
if(mimeType != null &&
ArchiveManager.ARCHIVE_TYPE.isUsableArchiveType(mimeType) && metaStrings.size()
> 0) {
// Looks like an implicit archive,
handle as such
metadata.setArchiveManifest();
+ if(persistent)
container.store(metadata);
// Pick up MIME type from inside archive
clientMetadata.clear();
+ if(persistent)
container.store(clientMetadata);
if(logMINOR) Logger.minor(this,
"Handling implicit container... (redirect)");
continue;
}
@@ -440,36 +614,41 @@
} catch (MalformedURLException e) {
throw new
FetchException(FetchException.INVALID_URI, e);
}
- LinkedList newMetaStrings =
newURI.listMetaStrings();
+ ArrayList<String> newMetaStrings =
newURI.listMetaStrings();
// Move any new meta strings to beginning of
our list of remaining meta strings
while(!newMetaStrings.isEmpty()) {
- Object o = newMetaStrings.removeLast();
- metaStrings.addFirst(o);
+ String o =
newMetaStrings.remove(newMetaStrings.size()-1);
+ metaStrings.add(0, o);
addedMetaStrings++;
}
- final SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, redirectedKey, metaStrings,
this.uri, addedMetaStrings, ctx, actx, ah, archiveMetadata, maxRetries,
recursionLevel, false, token, true, returnBucket, isFinal);
+ final SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, redirectedKey, metaStrings,
this.uri, addedMetaStrings, ctx, deleteFetchContext, actx, ah, archiveMetadata,
maxRetries, recursionLevel, false, token, true, returnBucket, isFinal,
container, context);
+ this.deleteFetchContext = false;
if((redirectedKey instanceof ClientCHK) &&
!((ClientCHK)redirectedKey).isMetadata())
- rcb.onBlockSetFinished(this);
+ rcb.onBlockSetFinished(this, container,
context);
if(metadata.isCompressed()) {
COMPRESSOR_TYPE codec =
metadata.getCompressionCodec();
f.addDecompressor(codec);
}
- parent.onTransition(this, f);
-
ctx.slowSerialExecutor[parent.priorityClass].execute(new Runnable() {
- public void run() {
- f.schedule();
- }
- }, "Schedule "+this);
+ parent.onTransition(this, f, container);
+ if(persistent) {
+ container.store(metaStrings);
+ container.store(f); // Store *before*
scheduling to avoid activation problems.
+ container.store(this);
+ }
+ f.schedule(container, context);
// All done! No longer our problem!
+ archiveMetadata = null; // passed on
+ if(persistent) removeFrom(container, context);
return;
} else if(metadata.isSplitfile()) {
if(logMINOR) Logger.minor(this, "Fetching
splitfile");
clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even
splitfiles can have mime types!
+ if(persistent) container.store(clientMetadata);
String mime = clientMetadata.getMIMEType();
- if(mime != null) rcb.onExpectedMIME(mime);
+ if(mime != null) rcb.onExpectedMIME(mime,
container);
String mimeType =
clientMetadata.getMIMETypeNoParams();
if(mimeType != null &&
ArchiveManager.ARCHIVE_TYPE.isUsableArchiveType(mimeType) && metaStrings.size()
> 0) {
@@ -477,13 +656,20 @@
metadata.setArchiveManifest();
// Pick up MIME type from inside archive
clientMetadata.clear();
+ if(persistent) {
+ container.store(metadata);
+ container.store(clientMetadata);
+ }
if(logMINOR) Logger.minor(this,
"Handling implicit container... (splitfile)");
continue;
}
if(metaStrings.isEmpty() && isFinal && mimeType
!= null && ctx.allowedMIMETypes != null &&
!ctx.allowedMIMETypes.contains(mimeType)) {
- throw new
FetchException(FetchException.WRONG_MIME_TYPE,
metadata.uncompressedDataLength(), false, clientMetadata.getMIMEType());
+ // Just in case...
+ long len =
metadata.uncompressedDataLength();
+ if(persistent) removeFrom(container,
context);
+ throw new
FetchException(FetchException.WRONG_MIME_TYPE, len, false,
clientMetadata.getMIMEType());
}
// Splitfile (possibly compressed)
@@ -491,6 +677,8 @@
if(metadata.isCompressed()) {
COMPRESSOR_TYPE codec =
metadata.getCompressionCodec();
addDecompressor(codec);
+ if(persistent)
+ container.store(decompressors);
}
if(isFinal && !ctx.ignoreTooManyPathComponents)
{
@@ -501,14 +689,16 @@
// It would be useful
to be able to fetch the data ...
// On the other hand
such inserts could cause unpredictable results?
// Would be useful to
make a redirect to the key we actually fetched.
- rcb.onFailure(new
FetchException(FetchException.INVALID_METADATA, "Invalid metadata: too many
path components in redirects", thisKey), this);
+ rcb.onFailure(new
FetchException(FetchException.INVALID_METADATA, "Invalid metadata: too many
path components in redirects", thisKey), this, container, context);
} else {
//
TOO_MANY_PATH_COMPONENTS
// report to user
FreenetURI tryURI = uri;
tryURI =
tryURI.dropLastMetaStrings(metaStrings.size());
- rcb.onFailure(new
FetchException(FetchException.TOO_MANY_PATH_COMPONENTS,
metadata.uncompressedDataLength(), (rcb == parent),
clientMetadata.getMIMEType(), tryURI), this);
+ rcb.onFailure(new
FetchException(FetchException.TOO_MANY_PATH_COMPONENTS,
metadata.uncompressedDataLength(), (rcb == parent),
clientMetadata.getMIMEType(), tryURI), this, container, context);
}
+ // Just in case...
+ if(persistent)
removeFrom(container, context);
return;
}
} else
@@ -520,41 +710,60 @@
if((len > ctx.maxOutputLength) ||
(len > ctx.maxTempLength)) {
-
- throw new
FetchException(FetchException.TOO_BIG, len, isFinal && decompressors.size() <=
(metadata.isCompressed() ? 1 : 0), clientMetadata.getMIMEType());
+ // Just in case...
+ boolean compressed =
metadata.isCompressed();
+ if(persistent) removeFrom(container,
context);
+ throw new
FetchException(FetchException.TOO_BIG, len, isFinal && decompressors.size() <=
(compressed ? 1 : 0), clientMetadata.getMIMEType());
}
- SplitFileFetcher sf = new
SplitFileFetcher(metadata, rcb, parent, ctx,
- decompressors, clientMetadata,
actx, recursionLevel, returnBucket, token);
- parent.onTransition(this, sf);
- sf.scheduleOffThread();
- rcb.onBlockSetFinished(this);
+ SplitFileFetcher sf = new
SplitFileFetcher(metadata, rcb, parent, ctx, deleteFetchContext,
+ decompressors, clientMetadata,
actx, recursionLevel, returnBucket, token, container, context);
+ this.deleteFetchContext = false;
+ if(persistent) {
+ container.store(sf); // Avoid problems
caused by storing a deactivated sf
+ if(!container.ext().isActive(parent)) {
+ container.activate(parent, 1);
+ Logger.error(this, "Not active:
"+parent);
+ }
+ }
+ parent.onTransition(this, sf, container);
+ try {
+ sf.schedule(container, context);
+ } catch (KeyListenerConstructionException e) {
+ onFailure(e.getFetchException(), false,
container, context);
+ if(persistent) container.deactivate(sf,
1);
+ return;
+ }
+ if(persistent) container.deactivate(sf, 1);
+ rcb.onBlockSetFinished(this, container,
context);
// Clear our own metadata, we won't need it any
more.
+ // Note that SplitFileFetcher() above will have
used the keys from the metadata,
+ // and will have removed them from it so they
don't get removed here.
+ // Lack of garbage collection in db4o is a PITA!
// For multi-level metadata etc see above.
- metadata = null;
-
- // SplitFile will now run.
- // Then it will return data to rcd.
- // We are now out of the loop. Yay!
+ if(persistent) removeFrom(container, context);
return;
} else {
Logger.error(this, "Don't know what to do with
metadata: "+metadata);
+ removeMetadata(container);
throw new
FetchException(FetchException.UNKNOWN_METADATA);
}
}
}
private String removeMetaString() {
- String name = (String) metaStrings.removeFirst();
+ String name = (String) metaStrings.remove(0);
if(addedMetaStrings > 0) addedMetaStrings--;
return name;
}
private void addDecompressor(COMPRESSOR_TYPE codec) {
- decompressors.addLast(codec);
+ if(logMINOR)
+ Logger.minor(this, "Adding decompressor: "+codec+" on
"+this, new Exception("debug"));
+ decompressors.add(codec);
}
- private void fetchArchive(boolean forData, Metadata meta, String
element, ArchiveExtractCallback callback) throws FetchException,
MetadataParseException, ArchiveFailureException, ArchiveRestartException {
+ private void fetchArchive(boolean forData, Metadata meta, String
element, ArchiveExtractCallback callback, final ObjectContainer container,
ClientContext context) throws FetchException, MetadataParseException,
ArchiveFailureException, ArchiveRestartException {
if(logMINOR) Logger.minor(this, "fetchArchive()");
// Fetch the archive
// How?
@@ -565,89 +774,175 @@
Metadata newMeta = (Metadata) meta.clone();
newMeta.setSimpleRedirect();
final SingleFileFetcher f;
- f = new SingleFileFetcher(this, newMeta, new
ArchiveFetcherCallback(forData, element, callback), new FetchContext(ctx,
FetchContext.SET_RETURN_ARCHIVES, true));
+ f = new SingleFileFetcher(this, persistent, true, newMeta, new
ArchiveFetcherCallback(forData, element, callback), new FetchContext(ctx,
FetchContext.SET_RETURN_ARCHIVES, true, null), container, context);
+ if(persistent) container.store(f);
if(logMINOR) Logger.minor(this, "fetchArchive(): "+f);
- ctx.executor.execute(new Runnable() {
- public void run() {
- // Fetch the archive. The archive fetcher
callback will unpack it, and either call the element
- // callback, or just go back around
handleMetadata() on this, which will see that the data is now
- // available.
- f.wrapHandleMetadata(true);
- }
- }, "Fetching archive for "+this);
+ // Fetch the archive. The archive fetcher callback will unpack
it, and either call the element
+ // callback, or just go back around handleMetadata() on this,
which will see that the data is now
+ // available.
+
+ // We need to transition here, so that everything gets deleted
if we are cancelled during the archive fetch phase.
+ if(persistent) container.activate(parent, 1);
+ parent.onTransition(this, f, container);
+
+ f.wrapHandleMetadata(true, container, context);
+ if(persistent) container.deactivate(f, 1);
}
/**
* Call handleMetadata(), and deal with any resulting exceptions
*/
- private void wrapHandleMetadata(boolean notFinalizedSize) {
+ private void wrapHandleMetadata(final boolean notFinalizedSize,
ObjectContainer container, final ClientContext context) {
+ if(!persistent)
+ innerWrapHandleMetadata(notFinalizedSize, container,
context);
+ else {
+ if(!context.jobRunner.onDatabaseThread())
+ context.jobRunner.queue(new DBJob() {
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(container.ext().isActive(SingleFileFetcher.this))
+ Logger.error(this,
"ALREADY ACTIVE in SFF callback: "+SingleFileFetcher.this);
+
container.activate(SingleFileFetcher.this, 1);
+
innerWrapHandleMetadata(notFinalizedSize, container, context);
+
container.deactivate(SingleFileFetcher.this, 1);
+ }
+ }, parent.getPriorityClass(), false);
+ else
+ innerWrapHandleMetadata(notFinalizedSize,
container, context);
+ }
+ }
+
+ protected void innerWrapHandleMetadata(boolean notFinalizedSize,
ObjectContainer container, ClientContext context) {
try {
- handleMetadata();
+ handleMetadata(container, context);
} catch (MetadataParseException e) {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, e), sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, e), false, container, context);
} catch (FetchException e) {
if(notFinalizedSize)
e.setNotFinalizedSize();
- onFailure(e, sched);
+ onFailure(e, false, container, context);
} catch (ArchiveFailureException e) {
- onFailure(new FetchException(e), sched);
+ onFailure(new FetchException(e), false, container,
context);
} catch (ArchiveRestartException e) {
- onFailure(new FetchException(e), sched);
+ onFailure(new FetchException(e), false, container,
context);
}
}
-
+
class ArchiveFetcherCallback implements GetCompletionCallback {
private final boolean wasFetchingFinalData;
private final String element;
private final ArchiveExtractCallback callback;
+ /** For activation we need to know whether we are persistent
even though the parent may not have been activated yet */
+ private final boolean persistent;
ArchiveFetcherCallback(boolean wasFetchingFinalData, String
element, ArchiveExtractCallback cb) {
this.wasFetchingFinalData = wasFetchingFinalData;
this.element = element;
this.callback = cb;
+ this.persistent = SingleFileFetcher.this.persistent;
}
- public void onSuccess(FetchResult result, ClientGetState state)
{
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ if(!persistent) {
+ // Run directly - we are running on some thread
somewhere, don't worry about it.
+ innerSuccess(result, container, context);
+ } else {
+ boolean wasActive;
+ // We are running on the database thread.
+ // Add a tag, unpack on a separate thread, copy
the data to a persistent bucket, then schedule on the database thread,
+ // remove the tag, and call the callback.
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ if(!wasActive)
+
container.activate(SingleFileFetcher.this, 1);
+ container.activate(parent, 1);
+ parent.onTransition(state,
SingleFileFetcher.this, container);
+ if(persistent)
+ container.activate(actx, 1);
+ ah.activateForExecution(container);
+
ah.extractPersistentOffThread(result.asBucket(), true, actx, element, callback,
container, context);
+ if(!wasActive)
+
container.deactivate(SingleFileFetcher.this, 1);
+ if(state != null)
+ state.removeFrom(container, context);
+ container.delete(this);
+ }
+ }
+
+ private void innerSuccess(FetchResult result, ObjectContainer
container, ClientContext context) {
try {
- ah.extractToCache(result.asBucket(), actx,
element, callback);
+ ah.extractToCache(result.asBucket(), actx,
element, callback, context.archiveManager, container, context);
} catch (ArchiveFailureException e) {
- SingleFileFetcher.this.onFailure(new
FetchException(e), sched);
+ SingleFileFetcher.this.onFailure(new
FetchException(e), false, container, context);
return;
} catch (ArchiveRestartException e) {
- SingleFileFetcher.this.onFailure(new
FetchException(e), sched);
+ SingleFileFetcher.this.onFailure(new
FetchException(e), false, container, context);
return;
} finally {
result.asBucket().free();
+ if(persistent)
result.asBucket().removeFrom(container);
}
if(callback != null) return;
- wrapHandleMetadata(true);
+ wrapHandleMetadata(true, container, context);
}
- public void onFailure(FetchException e, ClientGetState state) {
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ if(!wasActive)
+
container.activate(SingleFileFetcher.this, 1);
+ }
// Force fatal as the fetcher is presumed to have made
a reasonable effort.
- SingleFileFetcher.this.onFailure(e, true, sched);
+ SingleFileFetcher.this.onFailure(e, true, container,
context);
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
+ if(persistent) {
+ if(state != null)
+ state.removeFrom(container, context);
+ container.delete(this);
+ callback.removeFrom(container);
+ }
}
- public void onBlockSetFinished(ClientGetState state) {
+ public void onBlockSetFinished(ClientGetState state,
ObjectContainer container, ClientContext context) {
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ if(!wasActive)
+
container.activate(SingleFileFetcher.this, 1);
+ }
+ if(persistent)
+ container.activate(rcb, 1);
if(wasFetchingFinalData) {
- rcb.onBlockSetFinished(SingleFileFetcher.this);
+ rcb.onBlockSetFinished(SingleFileFetcher.this,
container, context);
}
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
}
- public void onTransition(ClientGetState oldState,
ClientGetState newState) {
+ public void onTransition(ClientGetState oldState,
ClientGetState newState, ObjectContainer container) {
// Ignore
}
- public void onExpectedMIME(String mime) {
+ public void onExpectedMIME(String mime, ObjectContainer
container) {
// Ignore
}
- public void onExpectedSize(long size) {
- rcb.onExpectedSize(size);
+ public void onExpectedSize(long size, ObjectContainer
container) {
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ if(!wasActive)
+
container.activate(SingleFileFetcher.this, 1);
+ }
+ if(persistent)
+ container.activate(rcb, 1);
+ rcb.onExpectedSize(size, container);
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
}
- public void onFinalizedMetadata() {
+ public void onFinalizedMetadata(ObjectContainer container) {
// Ignore
}
@@ -655,44 +950,96 @@
class MultiLevelMetadataCallback implements GetCompletionCallback {
- public void onSuccess(FetchResult result, ClientGetState state)
{
+ private final boolean persistent;
+
+ MultiLevelMetadataCallback() {
+ this.persistent = SingleFileFetcher.this.persistent;
+ }
+
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ container.activate(SingleFileFetcher.this, 1);
+ container.activate(parent, 1);
+ }
try {
- metadata =
Metadata.construct(result.asBucket());
+ parent.onTransition(state,
SingleFileFetcher.this, container);
+ Metadata meta =
Metadata.construct(result.asBucket());
+ removeMetadata(container);
+ synchronized(SingleFileFetcher.this) {
+ metadata = meta;
+ }
+ if(persistent) {
+ container.store(meta);
+ container.store(SingleFileFetcher.this);
+ }
+ wrapHandleMetadata(true, container, context);
} catch (MetadataParseException e) {
- SingleFileFetcher.this.onFailure(new
FetchException(FetchException.INVALID_METADATA, e), sched);
- return;
+ SingleFileFetcher.this.onFailure(new
FetchException(FetchException.INVALID_METADATA, e), false, container, context);
} catch (IOException e) {
// Bucket error?
- SingleFileFetcher.this.onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), sched);
- return;
+ SingleFileFetcher.this.onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), false, container, context);
} finally {
result.asBucket().free();
+ if(persistent)
+ result.asBucket().removeFrom(container);
}
- wrapHandleMetadata(true);
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
+ if(persistent) {
+ if(state != null) state.removeFrom(container,
context);
+ container.delete(this);
+ }
}
- public void onFailure(FetchException e, ClientGetState state) {
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ container.activate(SingleFileFetcher.this, 1);
+ }
// Pass it on; fetcher is assumed to have retried as
appropriate already, so this is fatal.
- SingleFileFetcher.this.onFailure(e, true, sched);
+ SingleFileFetcher.this.onFailure(e, true, container,
context);
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
+ if(persistent) {
+ if(state != null)
+ state.removeFrom(container, context);
+ // Conceivably removeFrom() could deactivate
this, so don't recheck
+ container.delete(this);
+ }
}
- public void onBlockSetFinished(ClientGetState state) {
+ public void onBlockSetFinished(ClientGetState state,
ObjectContainer container, ClientContext context) {
// Ignore as we are fetching metadata here
}
- public void onTransition(ClientGetState oldState,
ClientGetState newState) {
+ public void onTransition(ClientGetState oldState,
ClientGetState newState, ObjectContainer container) {
// Ignore
}
- public void onExpectedMIME(String mime) {
+ public void onExpectedMIME(String mime, ObjectContainer
container) {
// Ignore
}
- public void onExpectedSize(long size) {
- rcb.onExpectedSize(size);
+ public void onExpectedSize(long size, ObjectContainer
container) {
+ boolean wasActive = true;
+ boolean cbWasActive = true;
+ if(persistent) {
+ wasActive =
container.ext().isActive(SingleFileFetcher.this);
+ container.activate(SingleFileFetcher.this, 1);
+ cbWasActive = container.ext().isActive(rcb);
+ container.activate(rcb, 1);
+ }
+ rcb.onExpectedSize(size, container);
+ if(!wasActive)
+ container.deactivate(SingleFileFetcher.this, 1);
+ if(!cbWasActive)
+ container.deactivate(rcb, 1);
}
- public void onFinalizedMetadata() {
+ public void onFinalizedMetadata(ObjectContainer container) {
// Ignore
}
@@ -707,56 +1054,56 @@
* Create a fetcher for a key.
*/
public static ClientGetState create(ClientRequester requester,
GetCompletionCallback cb,
- ClientMetadata clientMetadata, FreenetURI uri,
FetchContext ctx, ArchiveContext actx,
+ FreenetURI uri, FetchContext ctx, ArchiveContext actx,
int maxRetries, int recursionLevel, boolean
dontTellClientGet, long l, boolean isEssential,
- Bucket returnBucket, boolean isFinal) throws
MalformedURLException, FetchException {
+ Bucket returnBucket, boolean isFinal, ObjectContainer
container, ClientContext context) throws MalformedURLException, FetchException {
BaseClientKey key = BaseClientKey.getBaseKey(uri);
- if((clientMetadata == null || clientMetadata.isTrivial()) &&
(!uri.hasMetaStrings()) &&
+ if((!uri.hasMetaStrings()) &&
ctx.allowSplitfiles == false &&
ctx.followRedirects == false &&
returnBucket == null && key instanceof
ClientKey)
- return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, requester, cb, isEssential, false, l);
+ return new SimpleSingleFileFetcher((ClientKey)key,
maxRetries, ctx, requester, cb, isEssential, false, l, container, context,
false);
if(key instanceof ClientKey)
- return new SingleFileFetcher(requester, cb,
clientMetadata, (ClientKey)key, uri.listMetaStrings(), uri, 0, ctx, actx, null,
null, maxRetries, recursionLevel, dontTellClientGet, l, isEssential,
returnBucket, isFinal);
+ return new SingleFileFetcher(requester, cb, null,
(ClientKey)key, new ArrayList<String>(uri.listMetaStrings()), uri, 0, ctx,
false, actx, null, null, maxRetries, recursionLevel, dontTellClientGet, l,
isEssential, returnBucket, isFinal, container, context);
else {
- return uskCreate(requester, cb, clientMetadata,
(USK)key, uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, l, isEssential, returnBucket, isFinal);
+ return uskCreate(requester, cb, (USK)key, new
ArrayList<String>(uri.listMetaStrings()), ctx, actx, maxRetries,
recursionLevel, dontTellClientGet, l, isEssential, returnBucket, isFinal,
container, context);
}
}
- private static ClientGetState uskCreate(ClientRequester requester,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetchContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, long l, boolean isEssential, Bucket
returnBucket, boolean isFinal) throws FetchException {
+ private static ClientGetState uskCreate(ClientRequester requester,
GetCompletionCallback cb, USK usk, ArrayList<String> metaStrings, FetchContext
ctx, ArchiveContext actx, int maxRetries, int recursionLevel, boolean
dontTellClientGet, long l, boolean isEssential, Bucket returnBucket, boolean
isFinal, ObjectContainer container, ClientContext context) throws
FetchException {
if(usk.suggestedEdition >= 0) {
// Return the latest known version but at least
suggestedEdition.
- long edition = ctx.uskManager.lookup(usk);
+ long edition = context.uskManager.lookup(usk);
if(edition <= usk.suggestedEdition) {
// Background fetch - start background fetch
first so can pick up updates in the datastore during registration.
-
ctx.uskManager.startTemporaryBackgroundFetcher(usk);
- edition = ctx.uskManager.lookup(usk);
+
context.uskManager.startTemporaryBackgroundFetcher(usk, context);
+ edition = context.uskManager.lookup(usk);
if(edition > usk.suggestedEdition) {
if(logMINOR)
Logger.minor(SingleFileFetcher.class, "Redirecting to edition "+edition);
- cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
usk.copy(edition).getURI().addMetaStrings(metaStrings)), null);
+ cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, container,
context);
return null;
} else {
// Transition to SingleFileFetcher
GetCompletionCallback myCB =
- new
USKProxyCompletionCallback(usk, ctx.uskManager, cb);
+ new
USKProxyCompletionCallback(usk, cb, requester.persistent());
// Want to update the latest known good
iff the fetch succeeds.
SingleFileFetcher sf =
- new
SingleFileFetcher(requester, myCB, clientMetadata, usk.getSSK(), metaStrings,
-
usk.getURI().addMetaStrings(metaStrings), 0, ctx, actx, null, null, maxRetries,
recursionLevel,
-
dontTellClientGet, l, isEssential, returnBucket, isFinal);
+ new
SingleFileFetcher(requester, myCB, null, usk.getSSK(), metaStrings,
+
usk.getURI().addMetaStrings(metaStrings), 0, ctx, false, actx, null, null,
maxRetries, recursionLevel,
+
dontTellClientGet, l, isEssential, returnBucket, isFinal, container, context);
return sf;
}
} else {
- cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
usk.copy(edition).getURI().addMetaStrings(metaStrings)), null);
+ cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, container,
context);
return null;
}
} else {
// Do a thorough, blocking search
- USKFetcher fetcher =
-
ctx.uskManager.getFetcher(usk.copy(-usk.suggestedEdition), ctx, requester,
false);
+ USKFetcherTag tag =
+
context.uskManager.getFetcher(usk.copy(-usk.suggestedEdition), ctx, false,
requester.persistent(),
+ new
MyUSKFetcherCallback(requester, cb, usk, metaStrings, ctx, actx, maxRetries,
recursionLevel, dontTellClientGet, l, returnBucket, requester.persistent()),
false, container, context);
if(isEssential)
- requester.addMustSucceedBlocks(1);
- fetcher.addCallback(new MyUSKFetcherCallback(requester,
cb, clientMetadata, usk, metaStrings, ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, l, returnBucket));
- return fetcher;
+ requester.addMustSucceedBlocks(1, container);
+ return tag;
}
}
@@ -764,9 +1111,8 @@
final ClientRequester parent;
final GetCompletionCallback cb;
- final ClientMetadata clientMetadata;
final USK usk;
- final LinkedList metaStrings;
+ final ArrayList<String> metaStrings;
final FetchContext ctx;
final ArchiveContext actx;
final int maxRetries;
@@ -774,11 +1120,11 @@
final boolean dontTellClientGet;
final long token;
final Bucket returnBucket;
+ final boolean persistent;
- public MyUSKFetcherCallback(ClientRequester requester,
GetCompletionCallback cb, ClientMetadata clientMetadata, USK usk, LinkedList
metaStrings, FetchContext ctx, ArchiveContext actx, int maxRetries, int
recursionLevel, boolean dontTellClientGet, long l, Bucket returnBucket) {
+ public MyUSKFetcherCallback(ClientRequester requester,
GetCompletionCallback cb, USK usk, ArrayList<String> metaStrings, FetchContext
ctx, ArchiveContext actx, int maxRetries, int recursionLevel, boolean
dontTellClientGet, long l, Bucket returnBucket, boolean persistent) {
this.parent = requester;
this.cb = cb;
- this.clientMetadata = clientMetadata;
this.usk = usk;
this.metaStrings = metaStrings;
this.ctx = ctx;
@@ -788,29 +1134,48 @@
this.dontTellClientGet = dontTellClientGet;
this.token = l;
this.returnBucket = returnBucket;
+ this.persistent = persistent;
}
- public void onFoundEdition(long l, USK newUSK) {
+ public void onFoundEdition(long l, USK newUSK, ObjectContainer
container, ClientContext context, boolean metadata, short codec, byte[] data) {
+ if(persistent)
+ container.activate(this, 2);
ClientSSK key = usk.getSSK(l);
try {
if(l == usk.suggestedEdition) {
- SingleFileFetcher sf = new
SingleFileFetcher(parent, cb, clientMetadata, key, metaStrings,
key.getURI().addMetaStrings(metaStrings),
- 0, ctx, actx, null,
null, maxRetries, recursionLevel+1, dontTellClientGet, token, false,
returnBucket, true);
- sf.schedule();
+ SingleFileFetcher sf = new
SingleFileFetcher(parent, cb, null, key, metaStrings,
key.getURI().addMetaStrings(metaStrings),
+ 0, ctx, false, actx,
null, null, maxRetries, recursionLevel+1, dontTellClientGet, token, false,
returnBucket, true, container, context);
+ sf.schedule(container, context);
+ if(persistent) removeFrom(container);
} else {
- cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
newUSK.getURI().addMetaStrings(metaStrings)), null);
+ cb.onFailure(new
FetchException(FetchException.PERMANENT_REDIRECT,
newUSK.getURI().addMetaStrings(metaStrings)), null, container, context);
+ if(persistent) removeFrom(container);
}
} catch (FetchException e) {
- cb.onFailure(e, null);
+ cb.onFailure(e, null, container, context);
+ if(persistent) removeFrom(container);
}
}
+
+ private void removeFrom(ObjectContainer container) {
+ container.delete(metaStrings);
+ container.delete(this);
+ container.activate(usk, 5);
+ usk.removeFrom(container);
+ }
- public void onFailure() {
- cb.onFailure(new
FetchException(FetchException.DATA_NOT_FOUND, "No USK found"), null);
+ public void onFailure(ObjectContainer container, ClientContext
context) {
+ if(persistent)
+ container.activate(this, 2);
+ cb.onFailure(new
FetchException(FetchException.DATA_NOT_FOUND, "No USK found"), null, container,
context);
+ if(persistent) removeFrom(container);
}
- public void onCancelled() {
- cb.onFailure(new
FetchException(FetchException.CANCELLED, (String)null), null);
+ public void onCancelled(ObjectContainer container,
ClientContext context) {
+ if(persistent)
+ container.activate(this, 2);
+ cb.onFailure(new
FetchException(FetchException.CANCELLED, (String)null), null, container,
context);
+ if(persistent) removeFrom(container);
}
public short getPollingPriorityNormal() {
@@ -822,5 +1187,47 @@
}
}
+
+ public void objectOnActivate(ObjectContainer container) {
+ Logger.minor(this, "ACTIVATING: "+this, new Exception("debug"));
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removeFrom() on "+this);
+ uri.removeFrom(container);
+ if(thisKey != null)
+ thisKey.removeFrom(container);
+ if(ah != null) {
+ ah.activateForExecution(container);
+ ah.removeFrom(container);
+ }
+ metaStrings.clear();
+ container.delete(metaStrings);
+ clientMetadata.removeFrom(container);
+ // actx is global to the ClientRequest, not our problem
+ decompressors.clear();
+ removeMetadata(container);
+ removeArchiveMetadata(container);
+ container.delete(decompressors);
+ super.removeFrom(container, context);
+ }
+
+ private void removeMetadata(ObjectContainer container) {
+ if(!persistent) return;
+ if(logMINOR) Logger.minor(this, "removeMetadata() on "+this);
+ if(metadata == null) return;
+ container.activate(metadata, 1);
+ metadata.removeFrom(container);
+ metadata = null;
+ }
+ private void removeArchiveMetadata(ObjectContainer container) {
+ if(!persistent) return;
+ if(logMINOR) Logger.minor(this, "removeArchiveMetadata() on
"+this);
+ if(archiveMetadata == null) return;
+ container.activate(archiveMetadata, 1);
+ archiveMetadata.removeFrom(container);
+ archiveMetadata = null;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/SingleFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -4,6 +4,8 @@
import java.net.MalformedURLException;
import java.util.HashMap;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertBlock;
import freenet.client.InsertContext;
import freenet.client.InsertException;
@@ -14,18 +16,23 @@
import freenet.client.events.StartedCompressionEvent;
import freenet.keys.BaseClientKey;
import freenet.keys.CHKBlock;
+import freenet.keys.ClientKey;
import freenet.keys.FreenetURI;
import freenet.keys.SSKBlock;
import freenet.node.PrioRunnable;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
+import freenet.support.OOMHandler;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
import freenet.support.compress.CompressJob;
-import freenet.support.compress.CompressionOutputSizeException;
+import freenet.support.compress.Compressor;
import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
import freenet.support.io.BucketChainBucketFactory;
import freenet.support.io.BucketTools;
import freenet.support.io.NativeThread;
+import freenet.support.io.NotPersistentBucket;
+import freenet.support.io.SegmentedBucketChainBucket;
/**
* Attempt to insert a file. May include metadata.
@@ -34,11 +41,22 @@
* Attempt to compress the file. Off-thread if it will take a while.
* Then hand it off to SimpleFileInserter.
*/
-public class SingleFileInserter implements ClientPutState, CompressJob {
+class SingleFileInserter implements ClientPutState {
- private static boolean logMINOR;
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final BaseClientPutter parent;
- final InsertBlock block;
+ InsertBlock block;
final InsertContext ctx;
final boolean metadata;
final PutCompletionCallback cb;
@@ -52,6 +70,18 @@
private final boolean freeData; // this is being set, but never read ???
private final String targetFilename;
private final boolean earlyEncode;
+ private final boolean persistent;
+ private boolean started;
+ private boolean cancelled;
+
+ // A persistent hashCode is helpful in debugging, and also means we can
put
+ // these objects into sets etc when we need to.
+
+ private final int hashCode;
+
+ public int hashCode() {
+ return hashCode;
+ }
/**
* @param parent
@@ -70,8 +100,9 @@
*/
SingleFileInserter(BaseClientPutter parent, PutCompletionCallback cb,
InsertBlock block,
boolean metadata, InsertContext ctx, boolean
dontCompress,
- boolean getCHKOnly, boolean reportMetadataOnly, Object
token, ARCHIVE_TYPE archiveType,
- boolean freeData, String targetFilename, boolean
earlyEncode) throws InsertException {
+ boolean getCHKOnly, boolean reportMetadataOnly, Object
token, ARCHIVE_TYPE archiveType,
+ boolean freeData, String targetFilename, boolean
earlyEncode) {
+ hashCode = super.hashCode();
this.earlyEncode = earlyEncode;
this.reportMetadataOnly = reportMetadataOnly;
this.token = token;
@@ -84,10 +115,11 @@
this.archiveType = archiveType;
this.freeData = freeData;
this.targetFilename = targetFilename;
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ this.persistent = parent.persistent();
+ if(logMINOR) Logger.minor(this, "Created "+this+"
persistent="+persistent+" freeData="+freeData);
}
- public void start(SimpleFieldSet fs) throws InsertException {
+ public void start(SimpleFieldSet fs, ObjectContainer container,
ClientContext context) throws InsertException {
if(fs != null) {
String type = fs.get("Type");
if(type.equals("SplitHandler")) {
@@ -95,28 +127,98 @@
// If we succeed, we bypass both compression
and FEC encoding!
try {
SplitHandler sh = new SplitHandler();
- sh.start(fs, false);
- cb.onTransition(this, sh);
- sh.schedule();
+ sh.start(fs, false, container, context);
+ boolean wasActive = true;
+
+ if(persistent) {
+ wasActive =
container.ext().isActive(cb);
+ if(!wasActive)
+ container.activate(cb,
1);
+ }
+ cb.onTransition(this, sh, container);
+ sh.schedule(container, context);
+ if(!wasActive)
+ container.deactivate(cb, 1);
return;
} catch (ResumeException e) {
Logger.error(this, "Failed to restore:
"+e, e);
}
}
}
- // Run off thread in any case
- ctx.compressor.enqueueNewJob(this);
+ if(persistent) {
+ container.activate(block, 1); // will cascade
+ }
+ tryCompress(container, context);
}
+
+ void onCompressed(CompressionOutput output, ObjectContainer container,
ClientContext context) {
+ boolean cbActive = true;
+ if(persistent) {
+ cbActive = container.ext().isActive(cb);
+ if(!cbActive)
+ container.activate(cb, 1);
+ }
+ if(started) {
+ Logger.error(this, "Already started, not starting
again", new Exception("error"));
+ return;
+ }
+ if(cancelled) {
+ Logger.error(this, "Already cancelled, not starting");
+ return;
+ }
+ if(persistent) container.activate(block, 1);
+ try {
+ onCompressedInner(output, container, context);
+ } catch (InsertException e) {
+ cb.onFailure(e, SingleFileInserter.this, container,
context);
+ } catch (OutOfMemoryError e) {
+ OOMHandler.handleOOM(e);
+ System.err.println("OffThreadCompressor thread above
failed.");
+ // Might not be heap, so try anyway
+ cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR, e, null),
SingleFileInserter.this, container, context);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught in OffThreadCompressor: "+t, t);
+ System.err.println("Caught in OffThreadCompressor: "+t);
+ t.printStackTrace();
+ // Try to fail gracefully
+ cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR, t, null),
SingleFileInserter.this, container, context);
+ }
+ if(!cbActive)
+ container.deactivate(cb, 1);
+ }
- public void tryCompress() throws InsertException {
- // First, determine how small it needs to be
- Bucket origData = block.getData();
- Bucket data = origData;
+ void onCompressedInner(CompressionOutput output, ObjectContainer
container, ClientContext context) throws InsertException {
+ boolean parentWasActive = true;
+ if(container != null) {
+ container.activate(block, 2);
+ parentWasActive = container.ext().isActive(parent);
+ if(!parentWasActive)
+ container.activate(parent, 1);
+ }
+ long origSize = block.getData().size();
+ Bucket bestCompressedData = output.data;
+ long bestCompressedDataSize = bestCompressedData.size();
+ Bucket data = bestCompressedData;
+ COMPRESSOR_TYPE bestCodec = output.bestCodec;
+
+ boolean shouldFreeData = freeData;
+ if(bestCodec != null) {
+ if(logMINOR) Logger.minor(this, "The best compression
algorithm is "+bestCodec+ " we have gained"+
(100-(bestCompressedDataSize*100/origSize)) +"% !
("+origSize+'/'+bestCompressedDataSize+')');
+ shouldFreeData = true; // must be freed regardless of
whether the original data was to be freed
+ if(freeData) {
+ block.getData().free();
+ if(persistent)
block.getData().removeFrom(container);
+ }
+ block.nullData();
+ } else {
+ data = block.getData();
+ }
+
int blockSize;
int oneBlockCompressedSize;
- boolean dontCompress = ctx.dontCompress;
- long origSize = data.size();
+ boolean isCHK = false;
+ if(persistent) container.activate(block.desiredURI, 5);
String type = block.desiredURI.getKeyType();
if(type.equals("SSK") || type.equals("KSK") ||
type.equals("USK")) {
blockSize = SSKBlock.DATA_LENGTH;
@@ -124,182 +226,228 @@
} else if(type.equals("CHK")) {
blockSize = CHKBlock.DATA_LENGTH;
oneBlockCompressedSize =
CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
+ isCHK = true;
} else {
throw new InsertException(InsertException.INVALID_URI,
"Unknown key type: "+type, null);
}
- COMPRESSOR_TYPE bestCodec = null;
- Bucket bestCompressedData = null;
- long bestCompressedDataSize = origSize;
+ // Compressed data ; now insert it
+ // We do NOT need to switch threads here: the actual
compression is done by InsertCompressor on the RealCompressor thread,
+ // which then switches either to the database thread or to a
new executable to run this method.
+
+ if(parent == cb) {
+ if(persistent) {
+ container.activate(ctx, 1);
+ container.activate(ctx.eventProducer, 1);
+ }
+ ctx.eventProducer.produceEvent(new
FinishedCompressionEvent(bestCodec == null ? -1 : bestCodec.metadataID,
origSize, data.size()), container, context);
+ if(logMINOR) Logger.minor(this, "Compressed
"+origSize+" to "+data.size()+" on "+this);
+ }
+
+ // Insert it...
+ short codecNumber = bestCodec == null ? -1 :
bestCodec.metadataID;
+ long compressedDataSize = data.size();
+ boolean fitsInOneBlockAsIs = bestCodec == null ?
compressedDataSize < blockSize : compressedDataSize < oneBlockCompressedSize;
+ boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <
CHKBlock.DATA_LENGTH : compressedDataSize < CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
- boolean tryCompress = (origSize > blockSize) &&
(!ctx.dontCompress) && (!dontCompress);
- if(tryCompress) {
- if(logMINOR) Logger.minor(this, "Attempt to compress
the data");
- // Try to compress the data.
- // Try each algorithm, starting with the fastest and
weakest.
- // Stop when run out of algorithms, or the compressed
data fits in a single block.
- for(COMPRESSOR_TYPE comp : COMPRESSOR_TYPE.values()) {
- boolean shouldFreeOnFinally = true;
- Bucket result = null;
+ if((fitsInOneBlockAsIs || fitsInOneCHK) && origSize >
Integer.MAX_VALUE)
+ throw new
InsertException(InsertException.INTERNAL_ERROR, "2GB+ should not encode to one
block!", null);
+
+ boolean noMetadata = ((block.clientMetadata == null) ||
block.clientMetadata.isTrivial()) && targetFilename == null;
+ if(noMetadata && archiveType == null) {
+ if(fitsInOneBlockAsIs) {
+ if(persistent && (data instanceof
NotPersistentBucket))
+ data = fixNotPersistent(data, context);
+ // Just insert it
+ ClientPutState bi =
+ createInserter(parent, data,
codecNumber, ctx, cb, metadata, (int)block.getData().size(), -1, getCHKOnly,
true, container, context, shouldFreeData);
+ if(logMINOR)
+ Logger.minor(this, "Inserting without
metadata: "+bi+" for "+this);
+ cb.onTransition(this, bi, container);
+ if(earlyEncode && bi instanceof
SingleBlockInserter && isCHK)
+
((SingleBlockInserter)bi).getBlock(container, context, true);
+ bi.schedule(container, context);
+ cb.onBlockSetFinished(this, container, context);
+ started = true;
+ if(persistent) {
+ if(!parentWasActive)
+ container.deactivate(parent, 1);
+ block.nullData();
+ block.removeFrom(container);
+ block = null;
+ removeFrom(container, context);
+ }
+ return;
+ }
+ }
+ if (fitsInOneCHK) {
+ // Insert single block, then insert pointer to it
+ if(persistent && (data instanceof NotPersistentBucket))
{
+ data = fixNotPersistent(data, context);
+ }
+ if(reportMetadataOnly) {
+ SingleBlockInserter dataPutter = new
SingleBlockInserter(parent, data, codecNumber, persistent ?
FreenetURI.EMPTY_CHK_URI.clone() : FreenetURI.EMPTY_CHK_URI, ctx, cb, metadata,
(int)origSize, -1, getCHKOnly, true, true, token, container, context,
persistent, shouldFreeData);
+ if(logMINOR)
+ Logger.minor(this, "Inserting with
metadata: "+dataPutter+" for "+this);
+ Metadata meta = makeMetadata(archiveType,
dataPutter.getURI(container, context));
+ cb.onMetadata(meta, this, container, context);
+ cb.onTransition(this, dataPutter, container);
+ dataPutter.schedule(container, context);
+ cb.onBlockSetFinished(this, container, context);
+ } else {
+ MultiPutCompletionCallback mcb =
+ new MultiPutCompletionCallback(cb,
parent, token);
+ SingleBlockInserter dataPutter = new
SingleBlockInserter(parent, data, codecNumber, persistent ?
FreenetURI.EMPTY_CHK_URI.clone() : FreenetURI.EMPTY_CHK_URI, ctx, mcb,
metadata, (int)origSize, -1, getCHKOnly, true, false, token, container,
context, persistent, shouldFreeData);
+ if(logMINOR)
+ Logger.minor(this, "Inserting data:
"+dataPutter+" for "+this);
+ Metadata meta = makeMetadata(archiveType,
dataPutter.getURI(container, context));
+ Bucket metadataBucket;
try {
- if(logMINOR)
- Logger.minor(this, "Attempt to
compress using " + comp);
- // Only produce if we are compressing
*the original data*
- if(parent == cb)
-
ctx.eventProducer.produceEvent(new StartedCompressionEvent(comp));
- result = comp.compress(origData, new
BucketChainBucketFactory(ctx.persistentBucketFactory, CHKBlock.DATA_LENGTH),
origSize, bestCompressedDataSize);
- long resultSize = result.size();
- if(resultSize < oneBlockCompressedSize)
{
- bestCodec = comp;
- if(bestCompressedData != null)
-
bestCompressedData.free();
- bestCompressedData = result;
- bestCompressedDataSize =
resultSize;
- shouldFreeOnFinally = false;
- break;
- }
- if(resultSize < bestCompressedDataSize)
{
- if(bestCompressedData != null)
-
bestCompressedData.free();
- bestCompressedData = result;
- bestCompressedDataSize =
resultSize;
- bestCodec = comp;
- shouldFreeOnFinally = false;
- }
- } catch(CompressionOutputSizeException e) {
- continue; // try next compressor
type
- } catch(IOException e) {
+ metadataBucket =
BucketTools.makeImmutableBucket(context.getBucketFactory(persistent),
meta.writeToByteArray());
+ } catch (IOException e) {
+ Logger.error(this, "Caught "+e, e);
throw new
InsertException(InsertException.BUCKET_ERROR, e, null);
- } finally {
- if(shouldFreeOnFinally && (result !=
null))
- result.free();
+ } catch (MetadataUnresolvedException e) {
+ // Impossible, we're not inserting a
manifest.
+ Logger.error(this, "Caught "+e, e);
+ throw new
InsertException(InsertException.INTERNAL_ERROR, "Got
MetadataUnresolvedException in SingleFileInserter: "+e.toString(), null);
}
+ ClientPutState metaPutter =
createInserter(parent, metadataBucket, (short) -1, ctx, mcb, true,
(int)origSize, -1, getCHKOnly, true, container, context, true);
+ if(logMINOR)
+ Logger.minor(this, "Inserting metadata:
"+metaPutter+" for "+this);
+ mcb.addURIGenerator(metaPutter, container);
+ mcb.add(dataPutter, container);
+ cb.onTransition(this, mcb, container);
+ if(earlyEncode && metaPutter instanceof
SingleBlockInserter && isCHK)
+
((SingleBlockInserter)metaPutter).getBlock(container, context, true);
+ Logger.minor(this, ""+mcb+" : data
"+dataPutter+" meta "+metaPutter);
+ mcb.arm(container, context);
+ dataPutter.schedule(container, context);
+ if(metaPutter instanceof SingleBlockInserter)
+
((SingleBlockInserter)metaPutter).encode(container, context, true);
+ metaPutter.schedule(container, context);
+ cb.onBlockSetFinished(this, container, context);
}
+ started = true;
+ if(persistent) {
+ if(!parentWasActive)
+ container.deactivate(parent, 1);
+ block.nullData();
+ block.removeFrom(container);
+ block = null;
+ removeFrom(container, context);
+ }
+ return;
}
- boolean shouldFreeData = false;
- if(bestCompressedData != null) {
- if(logMINOR) Logger.minor(this, "The best compression
algorithm is "+bestCodec+ " we have gained "+
(100-(bestCompressedDataSize*100/origSize)) +"% !
("+origSize+'/'+bestCompressedDataSize+')');
- data = bestCompressedData;
- shouldFreeData = true;
- compressorUsed = bestCodec;
- }
-
- if(parent == cb) {
- if(tryCompress)
- ctx.eventProducer.produceEvent(new
FinishedCompressionEvent(bestCodec == null ? -1 : bestCodec.metadataID,
origSize, data.size()));
- if(logMINOR) Logger.minor(this, "Compressed
"+origSize+" to "+data.size()+" on "+this);
- }
- // Compressed data ; now insert it
- // We do it off thread so that RealCompressor can release the
semaphore
- final COMPRESSOR_TYPE fbestCodec = bestCodec;
- final Bucket fdata = data;
- final int foneBlockCompressedSize = oneBlockCompressedSize;
- final int fblockSize = blockSize;
- final long forigSize = origSize;
- final boolean fshouldFreeData = shouldFreeData;
- ctx.executor.execute(new PrioRunnable() {
-
- public int getPriority() {
- return NativeThread.NORM_PRIORITY;
+ // Otherwise the file is too big to fit into one block
+ // We therefore must make a splitfile
+ // Job of SplitHandler: when the splitinserter has the metadata,
+ // insert it. Then when the splitinserter has finished, and the
+ // metadata insert has finished too, tell the master callback.
+ if(reportMetadataOnly) {
+ SplitFileInserter sfi = new SplitFileInserter(parent,
cb, data, bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata,
token, archiveType, shouldFreeData, persistent, container, context);
+ if(logMINOR)
+ Logger.minor(this, "Inserting as splitfile:
"+sfi+" for "+this);
+ cb.onTransition(this, sfi, container);
+ sfi.start(container, context);
+ if(earlyEncode) sfi.forceEncode(container, context);
+ if(persistent) {
+ container.store(sfi);
+ container.deactivate(sfi, 1);
}
-
- public void run() {
- insert(fbestCodec, fdata,
foneBlockCompressedSize, fblockSize, forigSize, fshouldFreeData);
+ block.nullData();
+ block.nullMetadata();
+ if(persistent) removeFrom(container, context);
+ } else {
+ SplitHandler sh = new SplitHandler();
+ SplitFileInserter sfi = new SplitFileInserter(parent,
sh, data, bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata,
token, archiveType, shouldFreeData, persistent, container, context);
+ sh.sfi = sfi;
+ if(logMINOR)
+ Logger.minor(this, "Inserting as splitfile:
"+sfi+" for "+sh+" for "+this);
+ if(persistent)
+ container.store(sh);
+ cb.onTransition(this, sh, container);
+ sfi.start(container, context);
+ if(earlyEncode) sfi.forceEncode(container, context);
+ if(persistent) {
+ container.store(sfi);
+ container.deactivate(sfi, 1);
}
- }, "Insert thread for "+this);
+ started = true;
+ if(persistent)
+ container.store(this);
+ }
+ if(persistent) {
+ if(!parentWasActive)
+ container.deactivate(parent, 1);
+ }
}
- private void insert(COMPRESSOR_TYPE bestCodec, Bucket data, int
oneBlockCompressedSize, int blockSize, long origSize, boolean shouldFreeData) {
+ private Bucket fixNotPersistent(Bucket data, ClientContext context)
throws InsertException {
+ boolean skip = false;
+ if(data instanceof SegmentedBucketChainBucket) {
+ SegmentedBucketChainBucket seg =
(SegmentedBucketChainBucket) data;
+ Bucket[] buckets = seg.getBuckets();
+ if(buckets.length == 1) {
+ seg.clear();
+ data = buckets[0];
+ skip = true;
+ if(logMINOR) Logger.minor(this, "Using bucket 0
of SegmentedBucketChainBucket");
+ }
+ }
try {
- // Insert it...
- short codecNumber = bestCodec == null ? -1 :
bestCodec.metadataID;
- long compressedDataSize = data.size();
- boolean fitsInOneBlockAsIs = bestCodec == null ?
compressedDataSize < blockSize : compressedDataSize < oneBlockCompressedSize;
- boolean fitsInOneCHK = bestCodec == null ?
compressedDataSize < CHKBlock.DATA_LENGTH : compressedDataSize <
CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
-
- if((fitsInOneBlockAsIs || fitsInOneCHK) &&
block.getData().size() > Integer.MAX_VALUE)
- throw new
InsertException(InsertException.INTERNAL_ERROR, "2GB+ should not encode to one
block!", null);
-
- boolean noMetadata = ((block.clientMetadata == null) ||
block.clientMetadata.isTrivial()) && targetFilename == null;
- if(noMetadata && archiveType == null)
- if(fitsInOneBlockAsIs) {
- // Just insert it
- ClientPutState bi =
- createInserter(parent, data,
codecNumber, block.desiredURI, ctx, cb, metadata, (int) block.getData().size(),
-1, getCHKOnly, true, true, freeData);
- cb.onTransition(this, bi);
- bi.schedule();
- cb.onBlockSetFinished(this);
- return;
- }
- if(fitsInOneCHK) {
- // Insert single block, then insert pointer to
it
- if(reportMetadataOnly) {
- SingleBlockInserter dataPutter = new
SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx,
cb, metadata, (int) origSize, -1, getCHKOnly, true, true, token, freeData);
- Metadata meta =
makeMetadata(archiveType, dataPutter.getURI());
- cb.onMetadata(meta, this);
- cb.onTransition(this, dataPutter);
- dataPutter.schedule();
- cb.onBlockSetFinished(this);
- } else {
- MultiPutCompletionCallback mcb =
- new
MultiPutCompletionCallback(cb, parent, token);
- SingleBlockInserter dataPutter = new
SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx,
mcb, metadata, (int) origSize, -1, getCHKOnly, true, false, token, freeData);
- Metadata meta =
makeMetadata(archiveType, dataPutter.getURI());
- Bucket metadataBucket;
- try {
- metadataBucket =
BucketTools.makeImmutableBucket(ctx.bf, meta.writeToByteArray());
- } catch(IOException e) {
- Logger.error(this, "Caught " +
e, e);
- throw new
InsertException(InsertException.BUCKET_ERROR, e, null);
- } catch(MetadataUnresolvedException e) {
- // Impossible, we're not
inserting a manifest.
- Logger.error(this, "Caught " +
e, e);
- throw new
InsertException(InsertException.INTERNAL_ERROR, "Got
MetadataUnresolvedException in SingleFileInserter: " + e.toString(), null);
- }
- ClientPutState metaPutter =
createInserter(parent, metadataBucket, (short) -1, block.desiredURI, ctx, mcb,
true, (int) origSize, -1, getCHKOnly, true, false, true);
- mcb.addURIGenerator(metaPutter);
- mcb.add(dataPutter);
- cb.onTransition(this, mcb);
- Logger.minor(this, "" + mcb + " : data
" + dataPutter + " meta " + metaPutter);
- mcb.arm();
- dataPutter.schedule();
- if(metaPutter instanceof
SingleBlockInserter)
- ((SingleBlockInserter)
metaPutter).encode();
- metaPutter.schedule();
- cb.onBlockSetFinished(this);
- }
- return;
+ if(!skip) {
+ if(logMINOR) Logger.minor(this, "Copying data from
"+data+" length "+data.size());
+ Bucket newData =
context.persistentBucketFactory.makeBucket(data.size());
+ BucketTools.copy(data, newData);
+ data.free();
+ data = newData;
}
- // Otherwise the file is too big to fit into one block
- // We therefore must make a splitfile
- // Job of SplitHandler: when the splitinserter has the
metadata,
- // insert it. Then when the splitinserter has finished,
and the
- // metadata insert has finished too, tell the master
callback.
- if(reportMetadataOnly) {
- SplitFileInserter sfi = new
SplitFileInserter(parent, cb, data, bestCodec, origSize, block.clientMetadata,
ctx, getCHKOnly, metadata, token, archiveType, shouldFreeData);
- cb.onTransition(this, sfi);
- sfi.start();
- if(earlyEncode)
- sfi.forceEncode();
- } else {
- SplitHandler sh = new SplitHandler();
- SplitFileInserter sfi = new
SplitFileInserter(parent, sh, data, bestCodec, origSize, block.clientMetadata,
ctx, getCHKOnly, metadata, token, archiveType, shouldFreeData);
- sh.sfi = sfi;
- cb.onTransition(this, sh);
- sfi.start();
- if(earlyEncode)
- sfi.forceEncode();
- }
- } catch(InsertException e) {
- onFailure(e, this);
+ } catch (IOException e) {
+ Logger.error(this, "Caught "+e+" while copying
non-persistent data", e);
+ throw new InsertException(InsertException.BUCKET_ERROR,
e, null);
}
+ // Note that SegmentedBCB *does* support splitting, so we don't
need to do anything to the data
+ // if it doesn't fit in a single block.
+ return data;
}
+
+ private void tryCompress(ObjectContainer container, ClientContext
context) throws InsertException {
+ // First, determine how small it needs to be
+ Bucket origData = block.getData();
+ Bucket data = origData;
+ int blockSize;
+ int oneBlockCompressedSize;
+ boolean dontCompress = ctx.dontCompress;
+
+ long origSize = data.size();
+ if(persistent)
+ container.activate(block.desiredURI, 5);
+ String type = block.desiredURI.getKeyType();
+ if(type.equals("SSK") || type.equals("KSK") ||
type.equals("USK")) {
+ blockSize = SSKBlock.DATA_LENGTH;
+ oneBlockCompressedSize =
SSKBlock.MAX_COMPRESSED_DATA_LENGTH;
+ } else if(type.equals("CHK")) {
+ blockSize = CHKBlock.DATA_LENGTH;
+ oneBlockCompressedSize =
CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
+ } else {
+ throw new InsertException(InsertException.INVALID_URI,
"Unknown key type: "+type, null);
+ }
+
+ boolean tryCompress = (origSize > blockSize) &&
(!ctx.dontCompress) && (!dontCompress);
+ if(tryCompress) {
+ InsertCompressor.start(container, context, this,
origData, oneBlockCompressedSize, context.getBucketFactory(persistent),
persistent);
+ } else {
+ CompressionOutput output = new CompressionOutput(data,
null);
+ onCompressed(output, container, context);
+ }
+ }
private Metadata makeMetadata(ARCHIVE_TYPE archiveType, FreenetURI uri)
{
Metadata meta = null;
if(archiveType != null)
meta = new Metadata(Metadata.ARCHIVE_MANIFEST,
archiveType, null, uri, block.clientMetadata);
- else // redirect
+ else // redirect
meta = new Metadata(Metadata.SIMPLE_REDIRECT,
archiveType, null, uri, block.clientMetadata);
if(targetFilename != null) {
HashMap hm = new HashMap();
@@ -309,25 +457,27 @@
return meta;
}
- private ClientPutState createInserter(BaseClientPutter parent, Bucket
data, short compressionCodec, FreenetURI uri,
+ private ClientPutState createInserter(BaseClientPutter parent, Bucket
data, short compressionCodec,
InsertContext ctx, PutCompletionCallback cb, boolean
isMetadata, int sourceLength, int token, boolean getCHKOnly,
- boolean addToParent, boolean encodeCHK, boolean
freeData) throws InsertException {
+ boolean addToParent, ObjectContainer container,
ClientContext context, boolean freeData) throws InsertException {
+ FreenetURI uri = block.desiredURI;
uri.checkInsertURI(); // will throw an exception if needed
if(uri.getKeyType().equals("USK")) {
try {
return new USKInserter(parent, data,
compressionCodec, uri, ctx, cb, isMetadata, sourceLength, token,
- getCHKOnly, addToParent, this.token,
freeData);
+ getCHKOnly, addToParent, this.token,
container, context, freeData, persistent);
} catch (MalformedURLException e) {
throw new
InsertException(InsertException.INVALID_URI, e, null);
}
} else {
SingleBlockInserter sbi =
new SingleBlockInserter(parent, data,
compressionCodec, uri, ctx, cb, isMetadata, sourceLength, token,
- getCHKOnly, addToParent, false,
this.token, freeData);
- if(encodeCHK)
- cb.onEncode(sbi.getBlock().getClientKey(),
this);
+ getCHKOnly, addToParent, false,
this.token, container, context, persistent, freeData);
+ // pass uri to SBI
+ block.nullURI();
+ if(persistent) container.store(block);
return sbi;
}
@@ -337,8 +487,11 @@
* When we get the metadata, start inserting it to our target key.
* When we have inserted both the metadata and the splitfile,
* call the master callback.
+ *
+ * This class has to be public so that db4o can access
objectOnActivation
+ * through reflection.
*/
- class SplitHandler implements PutCompletionCallback, ClientPutState {
+ public class SplitHandler implements PutCompletionCallback,
ClientPutState {
ClientPutState sfi;
ClientPutState metadataPutter;
@@ -349,6 +502,16 @@
boolean metaInsertSetBlocks;
boolean metaInsertStarted;
boolean metaFetchable;
+ final boolean persistent;
+
+ // A persistent hashCode is helpful in debugging, and also
means we can put
+ // these objects into sets etc when we need to.
+
+ private final int hashCode;
+
+ public int hashCode() {
+ return hashCode;
+ }
/**
* Create a SplitHandler from a stored progress SimpleFieldSet.
@@ -358,8 +521,15 @@
* @throws InsertException Thrown if some other error prevents
the insert
* from starting.
*/
- void start(SimpleFieldSet fs, boolean forceMetadata) throws
ResumeException, InsertException {
+ void start(SimpleFieldSet fs, boolean forceMetadata,
ObjectContainer container, ClientContext context) throws ResumeException,
InsertException {
+ boolean parentWasActive = true;
+ if(persistent) {
+ parentWasActive =
container.ext().isActive(parent);
+ if(!parentWasActive)
+ container.activate(parent, 1);
+ }
+
boolean meta = metadata || forceMetadata;
// Don't include the booleans; wait for the callback.
@@ -368,7 +538,7 @@
if(sfiFS == null)
throw new ResumeException("No
SplitFileInserter");
ClientPutState newSFI, newMetaPutter = null;
- newSFI = new SplitFileInserter(parent, this,
forceMetadata ? null : block.clientMetadata, ctx, getCHKOnly, meta, token,
archiveType, sfiFS);
+ newSFI = new SplitFileInserter(parent, this,
forceMetadata ? null : block.clientMetadata, ctx, getCHKOnly, meta, token,
archiveType, sfiFS, container, context);
if(logMINOR) Logger.minor(this, "Starting "+newSFI+"
for "+this);
fs.removeSubset("SplitFileInserter");
SimpleFieldSet metaFS = fs.subset("MetadataPutter");
@@ -378,10 +548,10 @@
if(type.equals("SplitFileInserter")) {
// FIXME
insertAsArchiveManifest ?!?!?!
newMetaPutter =
- new
SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, token,
archiveType, metaFS);
+ new
SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, token,
archiveType, metaFS, container, context);
} else if(type.equals("SplitHandler")) {
newMetaPutter = new
SplitHandler();
-
((SplitHandler)newMetaPutter).start(metaFS, true);
+
((SplitHandler)newMetaPutter).start(metaFS, true, container, context);
}
} catch (ResumeException e) {
newMetaPutter = null;
@@ -395,27 +565,40 @@
sfi = newSFI;
metadataPutter = newMetaPutter;
}
+ if(persistent) {
+ container.store(this);
+ if(!parentWasActive)
+ container.deactivate(parent, 1);
+ }
}
public SplitHandler() {
// Default constructor
+ this.persistent = SingleFileInserter.this.persistent;
+ this.hashCode = super.hashCode();
}
- public synchronized void onTransition(ClientPutState oldState,
ClientPutState newState) {
+ public synchronized void onTransition(ClientPutState oldState,
ClientPutState newState, ObjectContainer container) {
+ if(persistent) { // FIXME debug-point
+ if(logMINOR) Logger.minor(this, "Transition:
"+oldState+" -> "+newState);
+ }
if(oldState == sfi)
sfi = newState;
if(oldState == metadataPutter)
metadataPutter = newState;
+ if(persistent)
+ container.store(this);
}
- public void onSuccess(ClientPutState state) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void onSuccess(ClientPutState state, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(block, 2);
+ }
if(logMINOR) Logger.minor(this, "onSuccess("+state+")
for "+this);
boolean lateStart = false;
+ ClientPutState toRemove = null;
synchronized(this) {
if(finished){
- if(freeData)
- block.free();
return;
}
if(state == sfi) {
@@ -426,35 +609,82 @@
} else {
if(logMINOR) Logger.minor(this,
"Metadata already started for "+this+" : success="+metaInsertSuccess+"
started="+metaInsertStarted);
}
+ sfi = null;
+ toRemove = state;
} else if(state == metadataPutter) {
if(logMINOR) Logger.minor(this,
"Metadata insert succeeded for "+this+" : "+state);
metaInsertSuccess = true;
+ metadataPutter = null;
+ toRemove = state;
} else {
Logger.error(this, "Unknown: "+state+"
for "+this, new Exception("debug"));
}
if(splitInsertSuccess && metaInsertSuccess) {
if(logMINOR) Logger.minor(this, "Both
succeeded for "+this);
finished = true;
+ if(freeData) {
+ block.free(container);
+ if(persistent)
+ container.store(this);
+ } else {
+ block.nullData();
+ }
}
}
+ if(toRemove != null && persistent)
+ toRemove.removeFrom(container, context);
+ if(persistent)
+ container.store(this);
if(lateStart)
- startMetadata();
- else if(finished)
- cb.onSuccess(this);
+ startMetadata(container, context);
+ else if(finished) {
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onSuccess(this, container, context);
+ if(persistent)
+ container.deactivate(cb, 1);
+ }
}
- public void onFailure(InsertException e, ClientPutState state) {
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(block, 1);
+ }
+ boolean toFail = true;
+ boolean toRemove = false;
synchronized(this) {
+ if(state == sfi) {
+ toRemove = true;
+ sfi = null;
+ if(metadataPutter != null) {
+ if(persistent)
container.store(this);
+ }
+ } else if(state == metadataPutter) {
+ toRemove = true;
+ metadataPutter = null;
+ if(sfi != null) {
+ if(persistent)
container.store(this);
+ }
+ } else {
+ Logger.error(this, "onFailure() on
unknown state "+state+" on "+this, new Exception("debug"));
+ }
if(finished){
- if(freeData)
- block.free();
- return;
+ toFail = false; // Already failed
}
}
- fail(e);
+ if(toRemove && persistent)
+ state.removeFrom(container, context);
+ // fail() will cancel the other one, so we don't need
to.
+ // When it does, it will come back here, and we won't
call fail(), because fail() has already set finished = true.
+ if(toFail)
+ fail(e, container, context);
}
- public void onMetadata(Metadata meta, ClientPutState state) {
+ public void onMetadata(Metadata meta, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(cb, 1);
+ container.activate(block, 2);
+ }
InsertException e = null;
synchronized(this) {
if(finished) return;
@@ -468,7 +698,7 @@
Logger.error(this, "Got metadata for
metadata");
e = new
InsertException(InsertException.INTERNAL_ERROR, "Did not expect to get metadata
for metadata inserter", null);
} else if(state != sfi) {
- Logger.error(this, "Got metadata from
unknown state "+state);
+ Logger.error(this, "Got metadata from
unknown state "+state+" sfi="+sfi+" metadataPutter="+metadataPutter+" on
"+this+" persistent="+persistent, new Exception("debug"));
e = new
InsertException(InsertException.INTERNAL_ERROR, "Got metadata from unknown
state", null);
} else {
// Already started metadata putter ?
(in which case we've got the metadata twice)
@@ -476,22 +706,27 @@
}
}
if(reportMetadataOnly) {
- cb.onMetadata(meta, this);
+ if(persistent)
+ container.store(this);
+ cb.onMetadata(meta, this, container, context);
return;
}
if(e != null) {
- onFailure(e, state);
+ onFailure(e, state, container, context);
return;
}
byte[] metaBytes;
+ if(persistent)
+ // Load keys
+ container.activate(meta, 100);
try {
metaBytes = meta.writeToByteArray();
} catch (MetadataUnresolvedException e1) {
Logger.error(this, "Impossible: "+e1, e1);
InsertException ex = new
InsertException(InsertException.INTERNAL_ERROR, "MetadataUnresolvedException in
SingleFileInserter.SplitHandler: "+e1, null);
ex.initCause(e1);
- fail(ex);
+ fail(ex, container, context);
return;
}
@@ -510,7 +745,7 @@
Logger.error(this, "Impossible
(2): "+e1, e1);
InsertException ex = new
InsertException(InsertException.INTERNAL_ERROR, "MetadataUnresolvedException in
SingleFileInserter.SplitHandler(2): "+e1, null);
ex.initCause(e1);
- fail(ex);
+ fail(ex, container, context);
return;
}
}
@@ -518,91 +753,129 @@
Bucket metadataBucket;
try {
- metadataBucket =
BucketTools.makeImmutableBucket(ctx.bf, metaBytes);
+ metadataBucket =
BucketTools.makeImmutableBucket(context.getBucketFactory(persistent),
metaBytes);
} catch (IOException e1) {
InsertException ex = new
InsertException(InsertException.BUCKET_ERROR, e1, null);
- fail(ex);
+ fail(ex, container, context);
return;
}
InsertBlock newBlock = new InsertBlock(metadataBucket,
null, block.desiredURI);
- try {
synchronized(this) {
metadataPutter = new
SingleFileInserter(parent, this, newBlock, true, ctx, false, getCHKOnly, false,
token, archiveType, true, metaPutterTargetFilename, earlyEncode);
// If EarlyEncode, then start the
metadata insert ASAP, to get the key.
// Otherwise, wait until the data is
fetchable (to improve persistence).
+ if(logMINOR)
+ Logger.minor(this, "Created
metadata putter for "+this+" : "+metadataPutter+" bucket "+metadataBucket+"
size "+metadataBucket.size());
+ if(persistent)
+ container.store(this);
if(!(earlyEncode ||
splitInsertSuccess)) return;
}
if(logMINOR) Logger.minor(this, "Putting
metadata on "+metadataPutter+" from "+sfi+"
("+((SplitFileInserter)sfi).getLength()+ ')');
- } catch (InsertException e1) {
- cb.onFailure(e1, this);
- return;
- }
- startMetadata();
+ startMetadata(container, context);
}
- private void fail(InsertException e) {
+ private void fail(InsertException e, ObjectContainer container,
ClientContext context) {
if(logMINOR) Logger.minor(this, "Failing: "+e, e);
ClientPutState oldSFI = null;
ClientPutState oldMetadataPutter = null;
synchronized(this) {
if(finished){
- if(freeData)
- block.free();
return;
}
finished = true;
oldSFI = sfi;
oldMetadataPutter = metadataPutter;
}
+ if(persistent) {
+ container.store(this);
+ if(oldSFI != null)
+ container.activate(oldSFI, 1);
+ if(oldMetadataPutter != null)
+ container.activate(oldMetadataPutter,
1);
+ }
if(oldSFI != null)
- oldSFI.cancel();
+ oldSFI.cancel(container, context);
if(oldMetadataPutter != null)
- oldMetadataPutter.cancel();
- finished = true;
- cb.onFailure(e, this);
+ oldMetadataPutter.cancel(container, context);
+ if(persistent) {
+ container.activate(block, 2);
+ container.activate(cb, 1);
+ }
+ synchronized(this) {
+ if(freeData)
+ block.free(container);
+ else
+ block.nullData();
+ }
+ cb.onFailure(e, this, container, context);
}
public BaseClientPutter getParent() {
return parent;
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) // FIXME debug-point
+ if(logMINOR) Logger.minor(this, "onEncode() for
"+this+" : "+state+" : "+key);
synchronized(this) {
- if(state != metadataPutter) return;
+ if(state != metadataPutter) {
+ if(logMINOR) Logger.minor(this,
"ignored onEncode() for "+this+" : "+state);
+ return;
+ }
}
- cb.onEncode(key, this);
+ if(persistent) container.activate(cb, 1);
+ cb.onEncode(key, this, container, context);
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "Cancelling "+this);
ClientPutState oldSFI = null;
ClientPutState oldMetadataPutter = null;
synchronized(this) {
oldSFI = sfi;
oldMetadataPutter = metadataPutter;
}
+ if(persistent) {
+ container.store(this);
+ if(oldSFI != null)
+ container.activate(oldSFI, 1);
+ if(oldMetadataPutter != null)
+ container.activate(oldMetadataPutter,
1);
+ }
if(oldSFI != null)
- oldSFI.cancel();
+ oldSFI.cancel(container, context);
if(oldMetadataPutter != null)
- oldMetadataPutter.cancel();
+ oldMetadataPutter.cancel(container, context);
- if(freeData)
- block.free();
+ if(freeData) {
+ if(persistent)
+ container.activate(block, 2);
+ block.free(container);
+ } else {
+ block.nullData();
+ }
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state,
ObjectContainer container, ClientContext context) {
synchronized(this) {
if(state == sfi)
splitInsertSetBlocks = true;
else if (state == metadataPutter)
metaInsertSetBlocks = true;
+ if(persistent)
+ container.store(this);
if(!(splitInsertSetBlocks &&
metaInsertSetBlocks))
return;
}
- cb.onBlockSetFinished(this);
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onBlockSetFinished(this, container, context);
}
- public void schedule() throws InsertException {
- sfi.schedule();
+ public void schedule(ObjectContainer container, ClientContext
context) throws InsertException {
+ if(persistent)
+ container.activate(sfi, 1);
+ sfi.schedule(container, context);
}
public Object getToken() {
@@ -625,10 +898,11 @@
return fs;
}
- public void onFetchable(ClientPutState state) {
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
-
+ if(persistent) // FIXME debug-point
+ if(logMINOR) Logger.minor(this, "onFetchable on
"+this);
+
if(logMINOR) Logger.minor(this, "onFetchable("+state+
')');
boolean meta;
@@ -642,26 +916,37 @@
if(logMINOR) Logger.minor(this,
"Metadata fetchable"+(metaFetchable?"":" already"));
if(metaFetchable) return;
metaFetchable = true;
+ if(persistent)
+ container.store(this);
} else {
if(state != sfi) {
Logger.error(this, "onFetchable
for unknown state "+state);
return;
}
+ if(persistent)
+ container.store(this);
if(logMINOR) Logger.minor(this, "Data
fetchable");
if(metaInsertStarted) return;
}
}
- if(meta)
- cb.onFetchable(this);
+ if(meta) {
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFetchable(this, container);
+ }
}
- private void startMetadata() {
+ private void startMetadata(ObjectContainer container,
ClientContext context) {
+ if(persistent) // FIXME debug-point
+ if(logMINOR) Logger.minor(this,
"startMetadata() on "+this);
try {
ClientPutState putter;
ClientPutState splitInserter;
synchronized(this) {
if(metaInsertStarted) return;
+ if(persistent && metadataPutter != null)
+
container.activate(metadataPutter, 1);
putter = metadataPutter;
if(putter == null) {
if(logMINOR) Logger.minor(this,
"Cannot start metadata yet: no metadataPutter");
@@ -669,34 +954,78 @@
metaInsertStarted = true;
splitInserter = sfi;
}
+ if(persistent)
+ container.store(this);
if(putter != null) {
if(logMINOR) Logger.minor(this,
"Starting metadata inserter: "+putter+" for "+this);
- putter.schedule();
+ putter.schedule(container, context);
if(logMINOR) Logger.minor(this,
"Started metadata inserter: "+putter+" for "+this);
} else {
// Get all the URIs ASAP so we can
start to insert the metadata.
-
((SplitFileInserter)splitInserter).forceEncode();
+ if(persistent)
+
container.activate(splitInserter, 1);
+
((SplitFileInserter)splitInserter).forceEncode(container, context);
}
} catch (InsertException e1) {
Logger.error(this, "Failing "+this+" : "+e1,
e1);
- fail(e1);
+ fail(e1, container, context);
return;
}
}
+ public void objectOnActivate(ObjectContainer container) {
+ // Chain to containing class, since we use its members
extensively.
+ container.activate(SingleFileInserter.this, 1);
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removeFrom() on
"+this);
+ container.delete(this);
+ // Remove parent as well, since we always transition
from parent to SH i.e. it will not get a removeFrom().
+ SingleFileInserter.this.removeFrom(container, context);
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanUpdate() on
"+this, new Exception("debug"));
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(finished)
+ Logger.error(this, "objectCanNew but finished
on "+this, new Exception("error"));
+ else if(logMINOR)
+ Logger.minor(this, "objectCanNew() on "+this,
new Exception("debug"));
+ return true;
+ }
+
}
public BaseClientPutter getParent() {
return parent;
}
- public void cancel() {
- if(freeData)
- block.free();
+ public void cancel(ObjectContainer container, ClientContext context) {
+ if(logMINOR) Logger.minor(this, "Cancel "+this);
+ synchronized(this) {
+ if(cancelled) return;
+ cancelled = true;
+ }
+ if(freeData) {
+ if(persistent)
+ container.activate(block, 1);
+ block.free(container);
+ }
+ if(persistent)
+ container.store(this);
+ if(persistent)
+ container.activate(cb, 1);
+ // Must call onFailure so get removeFrom()'ed
+ cb.onFailure(new InsertException(InsertException.CANCELLED),
this, container, context);
}
- public void schedule() throws InsertException {
- start(null);
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
+ start(null, container, context);
}
public Object getToken() {
@@ -707,10 +1036,48 @@
return null;
}
- public void onFailure(InsertException e, ClientPutState c) {
- if(cb != null)
- cb.onFailure(e, c);
- else
- Logger.error(this, "The callback is null but we have
onFailure to call!");
+ public void onStartCompression(COMPRESSOR_TYPE ctype, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(ctx, 2);
+ }
+ if(parent == cb) {
+ if(ctx == null) throw new NullPointerException();
+ if(ctx.eventProducer == null) throw new
NullPointerException();
+ ctx.eventProducer.produceEvent(new
StartedCompressionEvent(ctype), container, context);
+ }
}
+
+ boolean cancelled() {
+ return cancelled;
+ }
+
+ boolean started() {
+ return started;
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removeFrom() on "+this, new
Exception("debug"));
+ // parent removes self
+ // token is passed in, creator of token is responsible for
removing it
+ if(block != null) {
+ container.activate(block, 1);
+ block.removeFrom(container);
+ }
+ // ctx is passed in, creator is responsible for removing it
+ // cb removes itself
+ container.delete(this);
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanUpdate() on "+this, new
Exception("debug"));
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanNew() on "+this, new
Exception("debug"));
+ return true;
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/SingleKeyListener.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/SingleKeyListener.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleKeyListener.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/SingleKeyListener.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,95 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.keys.NodeSSK;
+import freenet.node.SendableGet;
+
+public class SingleKeyListener implements KeyListener {
+
+ private final Key key;
+ private final BaseSingleFileFetcher fetcher;
+ private final boolean dontCache;
+ private boolean done;
+ private short prio;
+ private final boolean persistent;
+
+ public SingleKeyListener(Key key, BaseSingleFileFetcher fetcher,
boolean dontCache, short prio, boolean persistent) {
+ this.key = key;
+ this.fetcher = fetcher;
+ this.dontCache = dontCache;
+ this.prio = prio;
+ this.persistent = persistent;
+ }
+
+ public long countKeys() {
+ if(done) return 0;
+ else return 1;
+ }
+
+ public short definitelyWantKey(Key key, byte[] saltedKey,
ObjectContainer container,
+ ClientContext context) {
+ if(!key.equals(this.key)) return -1;
+ else return prio;
+ }
+
+ public boolean dontCache() {
+ return dontCache;
+ }
+
+ public HasKeyListener getHasKeyListener() {
+ return fetcher;
+ }
+
+ public short getPriorityClass(ObjectContainer container) {
+ return prio;
+ }
+
+ public SendableGet[] getRequestsForKey(Key key, byte[] saltedKey,
ObjectContainer container,
+ ClientContext context) {
+ if(!key.equals(this.key)) return null;
+ return new SendableGet[] { fetcher };
+ }
+
+ public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found,
+ ObjectContainer container, ClientContext context) {
+ if(!key.equals(this.key)) return false;
+ if(persistent)
+ container.activate(fetcher, 1);
+ fetcher.onGotKey(key, found, container, context);
+ if(persistent)
+ container.deactivate(fetcher, 1);
+ synchronized(this) {
+ done = true;
+ }
+ return true;
+ }
+
+ public Key[] listKeys(ObjectContainer container) {
+ return new Key[] { key };
+ }
+
+ public boolean persistent() {
+ return persistent;
+ }
+
+ public boolean probablyWantKey(Key key, byte[] saltedKey) {
+ if(done) return false;
+ return key.equals(this.key);
+ }
+
+ public synchronized void onRemove() {
+ done = true;
+ }
+
+ public boolean isEmpty() {
+ return done;
+ }
+
+ public boolean isSSK() {
+ return key instanceof NodeSSK;
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,10 +3,14 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
-import java.util.LinkedList;
+import java.util.ArrayList;
+import java.util.List;
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveContext;
import freenet.client.ClientMetadata;
import freenet.client.FetchContext;
@@ -16,7 +20,11 @@
import freenet.client.MetadataParseException;
import freenet.keys.CHKBlock;
import freenet.keys.ClientCHK;
+import freenet.keys.NodeCHK;
+import freenet.node.SendableGet;
+import freenet.support.BloomFilter;
import freenet.support.Fields;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.OOMHandler;
import freenet.support.api.Bucket;
@@ -27,11 +35,25 @@
* Fetch a splitfile, decompress it if need be, and return it to the
GetCompletionCallback.
* Most of the work is done by the segments, and we do not need a thread.
*/
-public class SplitFileFetcher implements ClientGetState {
+public class SplitFileFetcher implements ClientGetState, HasKeyListener {
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final FetchContext fetchContext;
+ final FetchContext blockFetchContext;
+ final boolean deleteFetchContext;
final ArchiveContext archiveContext;
- final LinkedList decompressors;
+ final List decompressors;
final ClientMetadata clientMetadata;
final ClientRequester parent;
final GetCompletionCallback cb;
@@ -56,25 +78,91 @@
private final Bucket returnBucket;
private boolean finished;
private long token;
+ final boolean persistent;
+ private FetchException otherFailure;
+ // A persistent hashCode is helpful in debugging, and also means we can
put
+ // these objects into sets etc when we need to.
+
+ private final int hashCode;
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ // Bloom filter stuff
+ /** The main bloom filter, which includes every key in the segment, is
stored
+ * in this file. It is a counting filter and is updated when a key is
found. */
+ File mainBloomFile;
+ /** The per-segment bloom filters are kept in this (slightly larger)
file,
+ * appended one after the next. */
+ File altBloomFile;
+ /** Size of the main Bloom filter in bytes. */
+ final int mainBloomFilterSizeBytes;
+ /** Default mainBloomElementsPerKey. False positives is approx
+ * 0.6185^[this number], so 19 gives us 0.01% false positives, which
should
+ * be acceptable even if there are thousands of splitfiles on the
queue. */
+ static final int DEFAULT_MAIN_BLOOM_ELEMENTS_PER_KEY = 19;
+ /** Number of hashes for the main filter. */
+ final int mainBloomK;
+ /** What proportion of false positives is acceptable for the per-segment
+ * Bloom filters? This is divided by the number of segments, so it is
(roughly)
+ * an overall probability of any false positive given that we reach the
+ * per-segment filters. IMHO 1 in 100 is adequate. */
+ static final double ACCEPTABLE_BLOOM_FALSE_POSITIVES_ALL_SEGMENTS =
0.01;
+ /** Size of per-segment bloom filter in bytes. This is calculated from
the
+ * above constant and the number of segments, and rounded up. */
+ final int perSegmentBloomFilterSizeBytes;
+ /** Number of hashes for the per-segment bloom filters. */
+ final int perSegmentK;
+ private int keyCount;
+ /** Salt used in the secondary Bloom filters if the primary matches.
+ * The primary Bloom filters use the already-salted saltedKey. */
+ private final byte[] localSalt;
+ /** Reference set on the first call to makeKeyListener().
+ * NOTE: db4o DOES NOT clear transient variables on deactivation.
+ * So as long as this is paged in (i.e. there is a reference to it,
i.e. the
+ * KeyListener), it will remain valid, once it is set by the first call
+ * during resuming. */
+ private transient SplitFileFetcherKeyListener tempListener;
+
public SplitFileFetcher(Metadata metadata, GetCompletionCallback rcb,
ClientRequester parent2,
- FetchContext newCtx, LinkedList decompressors,
ClientMetadata clientMetadata,
- ArchiveContext actx, int recursionLevel, Bucket
returnBucket, long token2) throws FetchException, MetadataParseException {
+ FetchContext newCtx, boolean deleteFetchContext, List
decompressors2, ClientMetadata clientMetadata,
+ ArchiveContext actx, int recursionLevel, Bucket
returnBucket, long token2, ObjectContainer container, ClientContext context)
throws FetchException, MetadataParseException {
+ this.persistent = parent2.persistent();
+ this.deleteFetchContext = deleteFetchContext;
+ if(logMINOR)
+ Logger.minor(this, "Persistence = "+persistent+" from
"+parent2, new Exception("debug"));
+ int hash = super.hashCode();
+ if(hash == 0) hash = 1;
+ this.hashCode = hash;
this.finished = false;
this.returnBucket = returnBucket;
this.fetchContext = newCtx;
+ if(newCtx == null)
+ throw new NullPointerException();
this.archiveContext = actx;
- this.decompressors = decompressors;
- this.clientMetadata = clientMetadata;
+ this.decompressors = persistent ? new ArrayList(decompressors2)
: decompressors2;
+ if(decompressors.size() > 1) {
+ Logger.error(this, "Multiple decompressors:
"+decompressors.size()+" - this is almost certainly a bug", new
Exception("debug"));
+ }
+ this.clientMetadata = clientMetadata == null ? new
ClientMetadata() : (ClientMetadata) clientMetadata.clone(); // copy it as in
SingleFileFetcher
this.cb = rcb;
this.recursionLevel = recursionLevel + 1;
this.parent = parent2;
+ localSalt = new byte[32];
+ context.random.nextBytes(localSalt);
if(parent2.isCancelled())
throw new FetchException(FetchException.CANCELLED);
overrideLength = metadata.dataLength();
this.splitfileType = metadata.getSplitfileType();
ClientCHK[] splitfileDataBlocks =
metadata.getSplitfileDataKeys();
ClientCHK[] splitfileCheckBlocks =
metadata.getSplitfileCheckKeys();
+ if(persistent) {
+ // Clear them here so they don't get deleted and we
don't need to clone them.
+ metadata.clearSplitfileKeys();
+ container.store(metadata);
+ }
for(int i=0;i<splitfileDataBlocks.length;i++)
if(splitfileDataBlocks[i] == null) throw new
MetadataParseException("Null: data block "+i+" of "+splitfileDataBlocks.length);
for(int i=0;i<splitfileCheckBlocks.length;i++)
@@ -86,20 +174,34 @@
finalLength = overrideLength;
}
long eventualLength = Math.max(overrideLength,
metadata.uncompressedDataLength());
- cb.onExpectedSize(eventualLength);
+ boolean wasActive = true;
+ if(persistent) {
+ wasActive = container.ext().isActive(cb);
+ if(!wasActive)
+ container.activate(cb, 1);
+ }
+ cb.onExpectedSize(eventualLength, container);
String mimeType = metadata.getMIMEType();
if(mimeType != null)
- cb.onExpectedMIME(mimeType);
+ cb.onExpectedMIME(mimeType, container);
if(metadata.uncompressedDataLength() > 0)
- cb.onFinalizedMetadata();
+ cb.onFinalizedMetadata(container);
+ if(!wasActive)
+ container.deactivate(cb, 1);
if(eventualLength > 0 && newCtx.maxOutputLength > 0 &&
eventualLength > newCtx.maxOutputLength)
throw new FetchException(FetchException.TOO_BIG,
eventualLength, true, clientMetadata.getMIMEType());
+ this.token = token2;
+
if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
// Don't need to do much - just fetch everything and
piece it together.
blocksPerSegment = -1;
checkBlocksPerSegment = -1;
segmentCount = 1;
+ if(splitfileCheckBlocks.length > 0) {
+ Logger.error(this, "Splitfile type is
SPLITFILE_NONREDUNDANT yet "+splitfileCheckBlocks.length+" check blocks found!!
: "+this);
+ throw new
FetchException(FetchException.INVALID_METADATA, "Splitfile type is
non-redundant yet have "+splitfileCheckBlocks.length+" check blocks");
+ }
} else if(splitfileType == Metadata.SPLITFILE_ONION_STANDARD) {
byte[] params = metadata.splitfileParams();
if((params == null) || (params.length < 8))
@@ -127,11 +229,63 @@
// Will be segmented.
} else throw new MetadataParseException("Unknown splitfile
format: "+splitfileType);
this.maxTempLength = fetchContext.maxTempLength;
- if(Logger.shouldLog(Logger.MINOR, this))
+ if(logMINOR)
Logger.minor(this, "Algorithm: "+splitfileType+",
blocks per segment: "+blocksPerSegment+
", check blocks per segment:
"+checkBlocksPerSegment+", segments: "+segmentCount+
", data blocks:
"+splitfileDataBlocks.length+", check blocks: "+splitfileCheckBlocks.length);
segments = new SplitFileFetcherSegment[segmentCount]; //
initially null on all entries
+
+ // Setup bloom parameters.
+ if(persistent) {
+ // FIXME: Should this be encrypted? It's protected to
some degree by the salt...
+ // Since it isn't encrypted, it's likely to be very
sparse; we should name
+ // it appropriately...
+ try {
+ mainBloomFile =
context.persistentFG.makeRandomFile();
+ altBloomFile =
context.persistentFG.makeRandomFile();
+ } catch (IOException e) {
+ throw new
FetchException(FetchException.BUCKET_ERROR, "Unable to create Bloom filter
files", e);
+ }
+ } else {
+ // Not persistent, keep purely in RAM.
+ mainBloomFile = null;
+ altBloomFile = null;
+ }
+ int mainElementsPerKey = DEFAULT_MAIN_BLOOM_ELEMENTS_PER_KEY;
+ int origSize = splitfileDataBlocks.length +
splitfileCheckBlocks.length;
+ mainBloomK = (int) (mainElementsPerKey * 0.7);
+ long elementsLong = origSize * mainElementsPerKey;
+ // REDFLAG: SIZE LIMIT: 3.36TB limit!
+ if(elementsLong > Integer.MAX_VALUE)
+ throw new FetchException(FetchException.TOO_BIG,
"Cannot fetch splitfiles with more than
"+(Integer.MAX_VALUE/mainElementsPerKey)+" keys! (approx 3.3TB)");
+ int mainSizeBits = (int)elementsLong; // counting filter
+ if((mainSizeBits & 7) != 0)
+ mainSizeBits += (8 - (mainSizeBits & 7));
+ mainBloomFilterSizeBytes = mainSizeBits / 8 * 2; // counting
filter
+ double acceptableFalsePositives =
ACCEPTABLE_BLOOM_FALSE_POSITIVES_ALL_SEGMENTS / segments.length;
+ int perSegmentBitsPerKey = (int)
Math.ceil(Math.log(acceptableFalsePositives) / Math.log(0.6185));
+ int segBlocks = blocksPerSegment + checkBlocksPerSegment;
+ if(segBlocks > origSize)
+ segBlocks = origSize;
+ int perSegmentSize = perSegmentBitsPerKey * segBlocks;
+ if((perSegmentSize & 7) != 0)
+ perSegmentSize += (8 - (perSegmentSize & 7));
+ perSegmentBloomFilterSizeBytes = perSegmentSize / 8;
+ perSegmentK = BloomFilter.optimialK(perSegmentSize, segBlocks);
+ keyCount = origSize;
+ // Now create it.
+ if(logMINOR)
+ Logger.minor(this, "Creating block filter for "+this+":
keys="+(splitfileDataBlocks.length+splitfileCheckBlocks.length)+" main bloom
size "+mainBloomFilterSizeBytes+" bytes, K="+mainBloomK+",
filename="+mainBloomFile+" alt bloom filter: filename="+altBloomFile+"
segments: "+segments.length+" each is "+perSegmentBloomFilterSizeBytes+" bytes
k="+perSegmentK);
+ try {
+ tempListener = new SplitFileFetcherKeyListener(this,
keyCount, mainBloomFile, altBloomFile, mainBloomFilterSizeBytes, mainBloomK,
!fetchContext.cacheLocalRequests, localSalt, segments.length,
perSegmentBloomFilterSizeBytes, perSegmentK, persistent, true);
+ } catch (IOException e) {
+ throw new FetchException(FetchException.BUCKET_ERROR,
"Unable to write Bloom filters for splitfile");
+ }
+
+ if(persistent)
+ container.store(this);
+
+ blockFetchContext = new FetchContext(fetchContext,
FetchContext.SPLITFILE_DEFAULT_BLOCK_MASK, true, null);
if(segmentCount == 1) {
// splitfile* will be overwritten, this is bad
// so copy them
@@ -141,7 +295,20 @@
if(splitfileCheckBlocks.length > 0)
System.arraycopy(splitfileCheckBlocks, 0,
newSplitfileCheckBlocks, 0, splitfileCheckBlocks.length);
segments[0] = new
SplitFileFetcherSegment(splitfileType, newSplitfileDataBlocks,
newSplitfileCheckBlocks,
- this, archiveContext, fetchContext,
maxTempLength, recursionLevel, true);
+ this, archiveContext,
blockFetchContext, maxTempLength, recursionLevel, parent, 0, true);
+ for(int i=0;i<newSplitfileDataBlocks.length;i++) {
+ if(logMINOR) Logger.minor(this, "Added data
block "+i+" : "+newSplitfileDataBlocks[i].getNodeKey());
+
tempListener.addKey(newSplitfileDataBlocks[i].getNodeKey(), 0, context);
+ }
+ for(int i=0;i<newSplitfileCheckBlocks.length;i++) {
+ if(logMINOR) Logger.minor(this, "Added check
block "+i+" : "+newSplitfileCheckBlocks[i].getNodeKey());
+
tempListener.addKey(newSplitfileCheckBlocks[i].getNodeKey(), 0, context);
+ }
+ if(persistent) {
+ container.store(segments[0]);
+ segments[0].deactivateKeys(container);
+ container.deactivate(segments[0], 1);
+ }
} else {
int dataBlocksPtr = 0;
int checkBlocksPtr = 0;
@@ -155,33 +322,59 @@
System.arraycopy(splitfileDataBlocks,
dataBlocksPtr, dataBlocks, 0, copyDataBlocks);
if(copyCheckBlocks > 0)
System.arraycopy(splitfileCheckBlocks,
checkBlocksPtr, checkBlocks, 0, copyCheckBlocks);
+ segments[i] = new
SplitFileFetcherSegment(splitfileType, dataBlocks, checkBlocks, this,
archiveContext,
+ blockFetchContext,
maxTempLength, recursionLevel+1, parent, i, i == segments.length-1);
+ for(int j=0;j<dataBlocks.length;j++)
+
tempListener.addKey(dataBlocks[j].getNodeKey(), i, context);
+ for(int j=0;j<checkBlocks.length;j++)
+
tempListener.addKey(checkBlocks[j].getNodeKey(), i, context);
+ if(persistent) {
+ container.store(segments[i]);
+ segments[i].deactivateKeys(container);
+ container.deactivate(segments[i], 1);
+ for(int
x=dataBlocksPtr;x<dataBlocksPtr+copyDataBlocks;x++)
+ splitfileDataBlocks[x] = null;
+ for(int
x=checkBlocksPtr;x<checkBlocksPtr+copyCheckBlocks;x++)
+ splitfileCheckBlocks[x] = null;
+ }
dataBlocksPtr += copyDataBlocks;
checkBlocksPtr += copyCheckBlocks;
- segments[i] = new
SplitFileFetcherSegment(splitfileType, dataBlocks, checkBlocks, this,
archiveContext,
- fetchContext, maxTempLength,
recursionLevel+1, i == segments.length-1);
}
if(dataBlocksPtr != splitfileDataBlocks.length)
throw new
FetchException(FetchException.INVALID_METADATA, "Unable to allocate all data
blocks to segments - buggy or malicious inserter");
if(checkBlocksPtr != splitfileCheckBlocks.length)
throw new
FetchException(FetchException.INVALID_METADATA, "Unable to allocate all check
blocks to segments - buggy or malicious inserter");
}
- this.token = token2;
- parent.addBlocks(splitfileDataBlocks.length +
splitfileCheckBlocks.length);
- parent.addMustSucceedBlocks(splitfileDataBlocks.length);
+ parent.addBlocks(splitfileDataBlocks.length +
splitfileCheckBlocks.length, container);
+ parent.addMustSucceedBlocks(splitfileDataBlocks.length,
container);
+ parent.notifyClients(container, context);
+
+ try {
+ tempListener.writeFilters();
+ } catch (IOException e) {
+ throw new FetchException(FetchException.BUCKET_ERROR,
"Unable to write Bloom filters for splitfile");
+ }
}
/** Return the final status of the fetch. Throws an exception, or
returns a
* Bucket containing the fetched data.
* @throws FetchException If the fetch failed for some reason.
*/
- private Bucket finalStatus() throws FetchException {
+ private Bucket finalStatus(ObjectContainer container, ClientContext
context) throws FetchException {
long finalLength = 0;
for(int i=0;i<segments.length;i++) {
SplitFileFetcherSegment s = segments[i];
- if(!s.isFinished()) throw new
IllegalStateException("Not all finished");
- s.throwError();
+ if(persistent)
+ container.activate(s, 1);
+ if(!s.succeeded()) {
+ throw new IllegalStateException("Not all
finished");
+ }
+ s.throwError(container);
// If still here, it succeeded
- finalLength += s.decodedLength();
+ long sz = s.decodedLength(container);
+ finalLength += sz;
+ if(logMINOR)
+ Logger.minor(this, "Segment "+i+" decoded
length "+sz+" total length now "+finalLength+" for "+s.dataBuckets.length+"
blocks which should be "+(s.dataBuckets.length * NodeCHK.BLOCK_SIZE));
// Healing is done by Segment
}
if(finalLength > overrideLength) {
@@ -193,17 +386,22 @@
long bytesWritten = 0;
OutputStream os = null;
Bucket output;
+ if(persistent) {
+ container.activate(decompressors, 5);
+ if(returnBucket != null)
+ container.activate(returnBucket, 5);
+ }
try {
- if((returnBucket != null) && decompressors.isEmpty())
+ if((returnBucket != null) && decompressors.isEmpty()) {
output = returnBucket;
- else
- output =
fetchContext.bucketFactory.makeBucket(finalLength);
+ } else
+ output =
context.getBucketFactory(parent.persistent()).makeBucket(finalLength);
os = output.getOutputStream();
for(int i=0;i<segments.length;i++) {
SplitFileFetcherSegment s = segments[i];
long max = (finalLength < 0 ? 0 : (finalLength
- bytesWritten));
bytesWritten += s.writeDecodedDataTo(os, max);
- s.freeDecodedData();
+ s.freeDecodedData(container);
}
} catch (IOException e) {
throw new FetchException(FetchException.BUCKET_ERROR,
e);
@@ -217,20 +415,27 @@
}
}
}
+ if(finalLength != output.size()) {
+ Logger.error(this, "Final length is supposed to be
"+finalLength+" but only written "+output.size());
+ }
return output;
}
- public void segmentFinished(SplitFileFetcherSegment segment) {
- boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public void segmentFinished(SplitFileFetcherSegment segment,
ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(this, 1);
if(logMINOR) Logger.minor(this, "Finished segment: "+segment);
boolean finish = false;
synchronized(this) {
boolean allDone = true;
- for(int i=0;i<segments.length;i++)
- if(!segments[i].isFinished()) {
+ for(int i=0;i<segments.length;i++) {
+ if(persistent)
+ container.activate(segments[i], 1);
+ if(!segments[i].succeeded()) {
if(logMINOR) Logger.minor(this,
"Segment "+segments[i]+" is not finished");
allDone = false;
}
+ }
if(allDone) {
if(allSegmentsFinished) {
Logger.error(this, "Was already
finished! (segmentFinished("+segment+ ')', new Exception("debug"));
@@ -238,75 +443,281 @@
allSegmentsFinished = true;
finish = true;
}
- }
+ } else {
+ for(int i=0;i<segments.length;i++) {
+ if(segments[i] == segment) continue;
+ if(persistent)
+
container.deactivate(segments[i], 1);
+ }
+ }
notifyAll();
}
- if(finish) finish();
+ if(persistent) container.store(this);
+ if(finish) finish(container, context);
}
- private void finish() {
+ private void finish(ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(cb, 1);
+ }
+ context.getChkFetchScheduler().removePendingKeys(this, true);
+ boolean cbWasActive = true;
try {
synchronized(this) {
+ if(otherFailure != null) {
+ throw otherFailure;
+ }
if(finished) {
Logger.error(this, "Was already
finished");
return;
}
finished = true;
}
- Bucket data = finalStatus();
+ if(persistent)
+ container.store(this);
+ Bucket data = finalStatus(container, context);
// Decompress
+ if(persistent) {
+ container.activate(decompressors, 5);
+ container.activate(returnBucket, 5);
+ cbWasActive = container.ext().isActive(cb);
+ if(!cbWasActive)
+ container.activate(cb, 1);
+ container.activate(fetchContext, 1);
+ if(fetchContext == null) {
+ Logger.error(this, "Fetch context is
null");
+
if(!container.ext().isActive(fetchContext)) {
+ Logger.error(this, "Fetch
context is null and splitfile is not activated", new Exception("error"));
+ container.activate(this, 1);
+
container.activate(decompressors, 5);
+
container.activate(returnBucket, 5);
+
container.activate(fetchContext, 1);
+ } else {
+ Logger.error(this, "Fetch
context is null and splitfile IS activated", new Exception("error"));
+ }
+ }
+ container.activate(fetchContext, 1);
+ }
+ int count = 0;
while(!decompressors.isEmpty()) {
- Compressor c = (Compressor)
decompressors.removeLast();
+ Compressor c = (Compressor)
decompressors.remove(decompressors.size()-1);
+ if(logMINOR)
+ Logger.minor(this, "Decompressing with
"+c);
long maxLen =
Math.max(fetchContext.maxTempLength, fetchContext.maxOutputLength);
Bucket orig = data;
try {
Bucket out = returnBucket;
if(!decompressors.isEmpty()) out = null;
- data = c.decompress(data,
fetchContext.bucketFactory, maxLen, maxLen * 4, out);
+ data = c.decompress(data,
context.getBucketFactory(parent.persistent()), maxLen, maxLen * 4, out);
} catch (IOException e) {
- cb.onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), this);
+ if(e.getMessage().equals("Not in GZIP
format") && count == 1) {
+ Logger.error(this, "Attempting
to decompress twice, failed, returning first round data: "+this);
+ break;
+ }
+ cb.onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), this, container, context);
return;
} catch (CompressionOutputSizeException e) {
- cb.onFailure(new
FetchException(FetchException.TOO_BIG, e.estimatedSize, false /* FIXME */,
clientMetadata.getMIMEType()), this);
+ if(logMINOR)
+ Logger.minor(this, "Too big:
maxSize = "+fetchContext.maxOutputLength+" maxTempSize =
"+fetchContext.maxTempLength);
+ cb.onFailure(new
FetchException(FetchException.TOO_BIG, e.estimatedSize, false /* FIXME */,
clientMetadata.getMIMEType()), this, container, context);
return;
} finally {
- if(orig != data) orig.free();
+ if(orig != data) {
+ orig.free();
+ if(persistent)
orig.removeFrom(container);
+ }
}
+ count++;
}
- cb.onSuccess(new FetchResult(clientMetadata, data),
this);
+ cb.onSuccess(new FetchResult(clientMetadata, data),
this, container, context);
} catch (FetchException e) {
- cb.onFailure(e, this);
+ cb.onFailure(e, this, container, context);
} catch (OutOfMemoryError e) {
OOMHandler.handleOOM(e);
System.err.println("Failing above attempted fetch...");
- cb.onFailure(new
FetchException(FetchException.INTERNAL_ERROR, e), this);
+ cb.onFailure(new
FetchException(FetchException.INTERNAL_ERROR, e), this, container, context);
} catch (Throwable t) {
- cb.onFailure(new
FetchException(FetchException.INTERNAL_ERROR, t), this);
+ Logger.error(this, "Caught "+t, t);
+ cb.onFailure(new
FetchException(FetchException.INTERNAL_ERROR, t), this, container, context);
}
+ if(!cbWasActive)
+ container.deactivate(cb, 1);
}
- public void schedule() {
- if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Scheduling "+this);
+ public void schedule(ObjectContainer container, ClientContext context)
throws KeyListenerConstructionException {
+ if(persistent)
+ container.activate(this, 1);
+ if(logMINOR) Logger.minor(this, "Scheduling "+this);
+ SendableGet[] getters = new SendableGet[segments.length];
for(int i=0;i<segments.length;i++) {
- segments[i].schedule();
+ if(logMINOR)
+ Logger.minor(this, "Scheduling segment "+i+" :
"+segments[i]);
+ if(persistent)
+ container.activate(segments[i], 1);
+ getters[i] = segments[i].schedule(container, context);
+ if(persistent)
+ container.deactivate(segments[i], 1);
}
+ BlockSet blocks = fetchContext.blocks;
+ context.getChkFetchScheduler().register(this, getters,
persistent, true, container, blocks, false);
}
- public void cancel() {
- for(int i=0;i<segments.length;i++)
- segments[i].cancel();
+ public void cancel(ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(this, 1);
+ for(int i=0;i<segments.length;i++) {
+ if(logMINOR)
+ Logger.minor(this, "Cancelling segment "+i);
+ if(persistent)
+ container.activate(segments[i], 1);
+ segments[i].cancel(container, context);
+ }
}
public long getToken() {
return token;
}
- public void scheduleOffThread() {
-
fetchContext.slowSerialExecutor[parent.priorityClass].execute(new Runnable() {
- public void run() {
- schedule();
+ /**
+ * Make our SplitFileFetcherKeyListener. Returns the one we created in
the
+ * constructor if possible, otherwise makes a new one. We must have
already
+ * constructed one at some point, maybe before a restart.
+ * @throws FetchException
+ */
+ public KeyListener makeKeyListener(ObjectContainer container,
ClientContext context) throws KeyListenerConstructionException {
+ synchronized(this) {
+ if(finished) return null;
+ if(tempListener != null) {
+ // Recently constructed
+ return tempListener;
}
- }, "Splitfile scheduler thread for "+this);
+ File main;
+ File alt;
+ boolean cacheLocalRequests;
+ if(fetchContext == null) {
+ Logger.error(this, "fetchContext deleted
without splitfile being deleted!");
+ return null;
+ }
+ if(persistent) {
+ container.activate(mainBloomFile, 5);
+ container.activate(altBloomFile, 5);
+ main = new File(mainBloomFile.getPath());
+ alt = new File(altBloomFile.getPath());
+ container.deactivate(mainBloomFile, 1);
+ container.deactivate(altBloomFile, 1);
+ container.activate(fetchContext, 1);
+ cacheLocalRequests =
fetchContext.cacheLocalRequests;
+ container.deactivate(fetchContext, 1);
+ } else {
+ main = null;
+ alt = null;
+ cacheLocalRequests =
fetchContext.cacheLocalRequests;
+ }
+ try {
+ if(logMINOR)
+ Logger.minor(this, "Attempting to read
Bloom filter for "+this+" main file="+main+" alt file="+alt);
+ tempListener =
+ new SplitFileFetcherKeyListener(this,
keyCount, main, alt, mainBloomFilterSizeBytes, mainBloomK, !cacheLocalRequests,
localSalt, segments.length, perSegmentBloomFilterSizeBytes, perSegmentK,
persistent, false);
+ } catch (IOException e) {
+ Logger.error(this, "Unable to read Bloom filter
for "+this+" attempting to reconstruct...", e);
+ main.delete();
+ alt.delete();
+ try {
+ mainBloomFile =
context.fg.makeRandomFile();
+ altBloomFile =
context.fg.makeRandomFile();
+ if(persistent)
+ container.store(this);
+ } catch (IOException e1) {
+ throw new
KeyListenerConstructionException(new
FetchException(FetchException.BUCKET_ERROR, "Unable to create Bloom filter
files in reconstruction", e1));
+ }
+
+ try {
+ tempListener =
+ new
SplitFileFetcherKeyListener(this, keyCount, mainBloomFile, altBloomFile,
mainBloomFilterSizeBytes, mainBloomK, !fetchContext.cacheLocalRequests,
localSalt, segments.length, perSegmentBloomFilterSizeBytes, perSegmentK,
persistent, true);
+ } catch (IOException e1) {
+ throw new
KeyListenerConstructionException(new
FetchException(FetchException.BUCKET_ERROR, "Unable to reconstruct Bloom
filters: "+e1, e1));
+ }
+ }
+ return tempListener;
+ }
}
+
+ public synchronized boolean isCancelled(ObjectContainer container) {
+ return finished;
+ }
+
+ public SplitFileFetcherSegment getSegment(int i) {
+ return segments[i];
+ }
+
+ public void removeMyPendingKeys(SplitFileFetcherSegment segment,
ObjectContainer container, ClientContext context) {
+ keyCount = tempListener.killSegment(segment, container,
context);
+ }
+
+ void setKeyCount(int keyCount2, ObjectContainer container) {
+ this.keyCount = keyCount2;
+ if(persistent)
+ container.store(this);
+ }
+
+ public void onFailed(KeyListenerConstructionException e,
ObjectContainer container, ClientContext context) {
+ otherFailure = e.getFetchException();
+ cancel(container, context);
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removeFrom() on "+this, new
Exception("debug"));
+ if(!container.ext().isStored(this)) {
+ Logger.error(this, "Already removed??? on "+this, new
Exception("error"));
+ return;
+ }
+ container.activate(blockFetchContext, 1);
+ blockFetchContext.removeFrom(container);
+ if(deleteFetchContext)
+ fetchContext.removeFrom(container);
+ container.activate(clientMetadata, 1);
+ clientMetadata.removeFrom(container);
+ container.delete(decompressors);
+ for(int i=0;i<segments.length;i++) {
+ SplitFileFetcherSegment segment = segments[i];
+ segments[i] = null;
+ container.activate(segment, 1);
+ segment.fetcherFinished(container, context);
+ }
+ container.activate(mainBloomFile, 5);
+ container.activate(altBloomFile, 5);
+ if(mainBloomFile != null && !mainBloomFile.delete() &&
mainBloomFile.exists())
+ Logger.error(this, "Unable to delete main bloom file:
"+mainBloomFile+" for "+this);
+ else if(mainBloomFile == null)
+ Logger.error(this, "mainBloomFile is null on "+this);
+ else
+ if(logMINOR) Logger.minor(this, "Deleted main bloom
file "+mainBloomFile);
+ if(altBloomFile != null && !altBloomFile.delete() &&
altBloomFile.exists())
+ Logger.error(this, "Unable to delete alt bloom file:
"+altBloomFile+" for "+this);
+ else if(altBloomFile == null)
+ Logger.error(this, "altBloomFile is null on "+this);
+ else
+ if(logMINOR) Logger.minor(this, "Deleted alt bloom file
"+altBloomFile);
+ container.delete(mainBloomFile);
+ container.delete(altBloomFile);
+ container.delete(this);
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(hashCode == 0) {
+ Logger.error(this, "Trying to update with hash 0 =>
already deleted! active="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+ public boolean objectCanNew(ObjectContainer container) {
+ if(hashCode == 0) {
+ Logger.error(this, "Trying to write with hash 0 =>
already deleted! active="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+
}
Copied: trunk/freenet/src/freenet/client/async/SplitFileFetcherKeyListener.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherKeyListener.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherKeyListener.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherKeyListener.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,383 @@
+package freenet.client.async;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+
+import freenet.crypt.SHA256;
+import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.node.PrioRunnable;
+import freenet.node.SendableGet;
+import freenet.support.BinaryBloomFilter;
+import freenet.support.CountingBloomFilter;
+import freenet.support.Logger;
+import freenet.support.io.NativeThread;
+
+/**
+ * KeyListener implementation for SplitFileFetcher.
+ * Details:
+ * - We have a bloom filter. This is kept in RAM, but stored in a file. It is a
+ * counting filter which is created with the splitfile; when a block is
+ * completed, it is removed from the filter, and we schedule a write after a
+ * certain period of time (we ensure that the write doesn't happen before
that).
+ * Hence even on a fast node, we won't have to write the filter so frequently
+ * as to be a problem. We could use mmap'ed filters, but that might also be a
+ * problem with fd's.
+ * - When a block is actually found, on the database thread, we load the per-
+ * segment bloom filters from the SplitFileFetcher, and thus determine which
+ * segment it belongs to. These are non-counting and static.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ *
+ * LOCKING: Synchronize when changing something, and writing to disk.
+ * Don't need to synchronize on read in most cases, at least for sane
+ * BloomFilter implementations (that is, counting with counting width less than
+ * and divisible into 8).
+ */
+public class SplitFileFetcherKeyListener implements KeyListener {
+
+ private final SplitFileFetcher fetcher;
+ private final boolean persistent;
+ private int keyCount;
+ private final byte[] filterBuffer;
+ private final CountingBloomFilter filter;
+ /** All the segment's bloom filters, stuck together into a single blob
+ * so can be read/written en bloc */
+ private final byte[] segmentsFilterBuffer;
+ private final BinaryBloomFilter[] segmentFilters;
+ /** We store the Bloom filter to this file, but we don't map it, since
we
+ * can't generally afford the fd's. */
+ private final File mainBloomFile;
+ /** Stores Bloom filters for every segment. */
+ private final File altBloomFile;
+ /** Wait for this period for new data to come in before writing the
filter.
+ * The filter is only ever subtracted from, so if we crash we just have
a
+ * few more false positives. On a fast node with slow disk, writing on
every
+ * completed block could become a major bottleneck. */
+ private static final int WRITE_DELAY = 60*1000;
+ private final boolean dontCache;
+ private short prio;
+ /** Used only if we reach the per-segment bloom filters. The overall
bloom
+ * filters use the global salt. */
+ private final byte[] localSalt;
+ private boolean killed;
+
+ /**
+ * Caller must create bloomFile, but it may be empty.
+ * @param newFilter If true, the bloom file is empty, and the bloom
filter
+ * should be created from scratch.
+ * @throws IOException
+ */
+ public SplitFileFetcherKeyListener(SplitFileFetcher parent, int
keyCount, File bloomFile, File altBloomFile, int mainBloomSizeBytes, int
mainBloomK, boolean dontCache, byte[] localSalt, int segments, int
segmentFilterSizeBytes, int segmentBloomK, boolean persistent, boolean
newFilter) throws IOException {
+ fetcher = parent;
+ this.persistent = persistent;
+ this.keyCount = keyCount;
+ this.mainBloomFile = bloomFile;
+ this.altBloomFile = altBloomFile;
+ this.dontCache = dontCache;
+ assert(localSalt.length == 32);
+ if(persistent) {
+ this.localSalt = new byte[32];
+ System.arraycopy(localSalt, 0, this.localSalt, 0, 32);
+ } else {
+ this.localSalt = localSalt;
+ }
+ segmentsFilterBuffer = new byte[segmentFilterSizeBytes *
segments];
+ ByteBuffer baseBuffer = ByteBuffer.wrap(segmentsFilterBuffer);
+ segmentFilters = new BinaryBloomFilter[segments];
+ int start = 0;
+ int end = segmentFilterSizeBytes;
+ for(int i=0;i<segments;i++) {
+ baseBuffer.position(start);
+ baseBuffer.limit(end);
+ ByteBuffer slice = baseBuffer.slice();
+ segmentFilters[i] = new BinaryBloomFilter(slice,
segmentFilterSizeBytes * 8, segmentBloomK);
+ start += segmentFilterSizeBytes;
+ end += segmentFilterSizeBytes;
+ }
+
+ filterBuffer = new byte[mainBloomSizeBytes];
+ if(newFilter) {
+ filter = new CountingBloomFilter(mainBloomSizeBytes * 8
/ 2, mainBloomK, filterBuffer);
+ filter.setWarnOnRemoveFromEmpty();
+ } else {
+ // Read from file.
+ FileInputStream fis = new FileInputStream(bloomFile);
+ DataInputStream dis = new DataInputStream(fis);
+ dis.readFully(filterBuffer);
+ dis.close();
+ filter = new CountingBloomFilter(mainBloomSizeBytes * 8
/ 2, mainBloomK, filterBuffer);
+ filter.setWarnOnRemoveFromEmpty();
+ fis = new FileInputStream(altBloomFile);
+ dis = new DataInputStream(fis);
+ dis.readFully(segmentsFilterBuffer);
+ dis.close();
+ }
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Created "+this+" for "+fetcher);
+ }
+
+ public long countKeys() {
+ return keyCount;
+ }
+
+ /**
+ * SplitFileFetcher adds keys in whatever blocks are convenient.
+ * @param keys
+ */
+ void addKey(Key key, int segNo, ClientContext context) {
+ byte[] saltedKey = context.getChkFetchScheduler().saltKey(key);
+ filter.addKey(saltedKey);
+ segmentFilters[segNo].addKey(localSaltKey(key));
+ if(!segmentFilters[segNo].checkFilter(localSaltKey(key)))
+ Logger.error(this, "Key added but not in filter:
"+key+" on "+this);
+ }
+
+ private byte[] localSaltKey(Key key) {
+ MessageDigest md = SHA256.getMessageDigest();
+ md.update(key.getRoutingKey());
+ md.update(localSalt);
+ byte[] ret = md.digest();
+ SHA256.returnMessageDigest(md);
+ return ret;
+ }
+
+ public boolean probablyWantKey(Key key, byte[] saltedKey) {
+ if(filter == null) Logger.error(this, "Probably want key:
filter = null for "+this+ " fetcher = "+fetcher);
+ return filter.checkFilter(saltedKey);
+ }
+
+ public short definitelyWantKey(Key key, byte[] saltedKey,
ObjectContainer container,
+ ClientContext context) {
+ // Caller has already called probablyWantKey(), so don't do it
again.
+ byte[] salted = localSaltKey(key);
+ for(int i=0;i<segmentFilters.length;i++) {
+ if(segmentFilters[i].checkFilter(salted)) {
+ if(persistent) {
+ if(container.ext().isActive(fetcher))
+ Logger.error(this, "ALREADY
ACTIVE in definitelyWantKey(): "+fetcher);
+ container.activate(fetcher, 1);
+ }
+ SplitFileFetcherSegment segment =
fetcher.getSegment(i);
+ if(persistent)
+ container.deactivate(fetcher, 1);
+ if(persistent) {
+ if(container.ext().isActive(segment))
+ Logger.error(this, "ALREADY
ACTIVE in definitelyWantKey(): "+segment);
+ container.activate(segment, 1);
+ }
+ boolean found = segment.getBlockNumber(key,
container) >= 0;
+ if(!found)
+ Logger.error(this, "Found block in
primary and segment bloom filters but segment doesn't want it: "+segment+" on
"+this);
+ if(persistent)
+ container.deactivate(segment, 1);
+ if(found) return prio;
+ }
+ }
+ return -1;
+ }
+
+ public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock block,
+ ObjectContainer container, ClientContext context) {
+ // Caller has already called probablyWantKey(), so don't do it
again.
+ boolean found = false;
+ byte[] salted = localSaltKey(key);
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR)
+ Logger.minor(this, "handleBlock("+key+") on "+this+"
for "+fetcher);
+ for(int i=0;i<segmentFilters.length;i++) {
+ boolean match;
+ synchronized(this) {
+ match = segmentFilters[i].checkFilter(salted);
+ }
+ if(match) {
+ if(persistent) {
+ if(!container.ext().isStored(fetcher)) {
+ Logger.error(this, "Fetcher not
in database! for "+this);
+ return false;
+ }
+ if(container.ext().isActive(fetcher))
+ Logger.error(this, "ALREADY
ACTIVATED: "+fetcher);
+ container.activate(fetcher, 1);
+ }
+ SplitFileFetcherSegment segment =
fetcher.getSegment(i);
+ if(persistent) {
+ if(container.ext().isActive(segment))
+ Logger.error(this, "ALREADY
ACTIVATED: "+segment);
+ container.activate(segment, 1);
+ }
+ if(logMINOR)
+ Logger.minor(this, "Key "+key+" may be
in segment "+segment);
+ if(segment.onGotKey(key, block, container,
context)) {
+ synchronized(this) {
+
if(filter.checkFilter(saltedKey)) {
+
filter.removeKey(saltedKey);
+ keyCount--;
+ } else {
+ Logger.error(this, "Not
removing key from splitfile filter because already removed!: "+key+" for
"+this, new Exception("debug"));
+ }
+ }
+ // Update the persistent keyCount.
+ fetcher.setKeyCount(keyCount,
container);
+ found = true;
+ }
+ if(persistent)
+ container.deactivate(segment, 1);
+ if(persistent)
+ container.deactivate(fetcher, 1);
+ }
+ }
+ return found;
+ }
+
+ public boolean dontCache() {
+ return dontCache;
+ }
+
+ public HasKeyListener getHasKeyListener() {
+ return fetcher;
+ }
+
+ public short getPriorityClass(ObjectContainer container) {
+ return prio;
+ }
+
+ public SendableGet[] getRequestsForKey(Key key, byte[] saltedKey,
+ ObjectContainer container, ClientContext context) {
+ ArrayList<SendableGet> ret = new ArrayList<SendableGet>();
+ // Caller has already called probablyWantKey(), so don't do it
again.
+ byte[] salted = localSaltKey(key);
+ for(int i=0;i<segmentFilters.length;i++) {
+ if(segmentFilters[i].checkFilter(salted)) {
+ if(persistent) {
+ if(container.ext().isActive(fetcher))
+ Logger.error(this, "ALREADY
ACTIVATED in getRequestsForKey: "+fetcher);
+ container.activate(fetcher, 1);
+ }
+ SplitFileFetcherSegment segment =
fetcher.getSegment(i);
+ if(persistent)
+ container.deactivate(fetcher, 1);
+ if(persistent) {
+ if(container.ext().isActive(segment))
+ Logger.error(this, "ALREADY
ACTIVATED in getRequestsForKey: "+segment);
+ container.activate(segment, 1);
+ }
+ int blockNum = segment.getBlockNumber(key,
container);
+ if(blockNum >= 0) {
+
ret.add(segment.getSubSegmentFor(blockNum, container));
+ }
+ if(persistent)
+ container.deactivate(segment, 1);
+ }
+ }
+ return ret.toArray(new SendableGet[ret.size()]);
+ }
+
+ public void onRemove() {
+ synchronized(this) {
+ killed = true;
+ }
+ if(persistent) {
+ mainBloomFile.delete();
+ altBloomFile.delete();
+ }
+ }
+
+ public boolean persistent() {
+ return persistent;
+ }
+
+ public void writeFilters() throws IOException {
+ if(!persistent) return;
+ synchronized(this) {
+ if(killed) return;
+ }
+ RandomAccessFile raf = new RandomAccessFile(mainBloomFile,
"rw");
+ raf.write(filterBuffer);
+ raf.close();
+ raf = new RandomAccessFile(altBloomFile, "rw");
+ raf.write(segmentsFilterBuffer);
+ raf.close();
+ }
+
+ public synchronized int killSegment(SplitFileFetcherSegment segment,
ObjectContainer container, ClientContext context) {
+ int segNo = segment.segNum;
+ segmentFilters[segNo].unsetAll();
+ Key[] removeKeys = segment.listKeys(container);
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing segment from bloom filter:
"+segment+" keys: "+removeKeys.length);
+ for(int i=0;i<removeKeys.length;i++) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing key from bloom
filter: "+removeKeys[i]);
+ byte[] salted =
context.getChkFetchScheduler().saltKey(removeKeys[i]);
+ if(filter.checkFilter(salted)) {
+ filter.removeKey(salted);
+ } else
+ // Huh??
+ Logger.error(this, "Removing key
"+removeKeys[i]+" for "+this+" from "+segment+" : NOT IN BLOOM FILTER!", new
Exception("debug"));
+ }
+ scheduleWriteFilters(context);
+ return keyCount -= removeKeys.length;
+ }
+
+ private boolean writingBloomFilter;
+
+ /** Arrange to write the filters, at some point after this transaction
is
+ * committed. */
+ private void scheduleWriteFilters(ClientContext context) {
+ synchronized(this) {
+ // Worst case, we end up blocking the database thread
while a write completes off thread.
+ // Common case, the write executes on a separate thread.
+ // Don't run the write at too low a priority or we may
get priority inversion.
+ if(writingBloomFilter) return;
+ writingBloomFilter = true;
+ try {
+ context.ticker.queueTimedJob(new PrioRunnable()
{
+
+ public void run() {
+
synchronized(SplitFileFetcherKeyListener.this) {
+ try {
+ writeFilters();
+ } catch (IOException e)
{
+
Logger.error(this, "Failed to write bloom filters, we will have more false
positives on already-found blocks which aren't in the store: "+e, e);
+ } finally {
+
writingBloomFilter = true;
+ }
+ }
+ }
+
+ public int getPriority() {
+ // Don't run the write at too
low a priority or we may get priority inversion.
+ return
NativeThread.HIGH_PRIORITY;
+ }
+
+ }, WRITE_DELAY);
+ } catch (Throwable t) {
+ writingBloomFilter = false;
+ }
+ }
+ }
+
+ public boolean isEmpty() {
+ // FIXME: We rely on SplitFileFetcher unregistering itself.
+ // Maybe we should keep track of how many segments have been
cleared?
+ // We'd have to be sure that they weren't cleared twice...?
+ return killed;
+ }
+
+ public boolean isSSK() {
+ return false;
+ }
+
+ public void objectOnDeactivate(ObjectContainer container) {
+ Logger.error(this, "Deactivating a SplitFileFetcherKeyListener:
"+this, new Exception("error"));
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,27 +5,39 @@
import java.io.IOException;
import java.io.OutputStream;
+import java.util.HashSet;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveContext;
+import freenet.client.FECCallback;
import freenet.client.FECCodec;
import freenet.client.FECJob;
+import freenet.client.FECQueue;
import freenet.client.FailureCodeTracker;
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.Metadata;
import freenet.client.MetadataParseException;
import freenet.client.SplitfileBlock;
-import freenet.client.FECCodec.StandardOnionFECCodecEncoderCallback;
import freenet.keys.CHKBlock;
import freenet.keys.CHKEncodeException;
+import freenet.keys.CHKVerifyException;
import freenet.keys.ClientCHK;
import freenet.keys.ClientCHKBlock;
+import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.Key;
+import freenet.keys.KeyBlock;
+import freenet.keys.KeyDecodeException;
import freenet.keys.NodeCHK;
+import freenet.keys.TooBigException;
import freenet.node.RequestScheduler;
+import freenet.node.SendableGet;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
+import freenet.support.RandomGrabArray;
import freenet.support.api.Bucket;
import freenet.support.io.BucketTools;
@@ -33,9 +45,20 @@
* A single segment within a SplitFileFetcher.
* This in turn controls a large number of SplitFileFetcherSubSegment's, which
are registered on the ClientRequestScheduler.
*/
-public class SplitFileFetcherSegment implements
StandardOnionFECCodecEncoderCallback {
+public class SplitFileFetcherSegment implements FECCallback {
private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final short splitfileType;
final ClientCHK[] dataKeys;
final ClientCHK[] checkKeys;
@@ -45,11 +68,11 @@
final long[] checkCooldownTimes;
final int[] dataRetries;
final int[] checkRetries;
- final Vector subSegments;
+ final Vector<SplitFileFetcherSubSegment> subSegments;
final int minFetched;
final SplitFileFetcher parentFetcher;
+ final ClientRequester parent;
final ArchiveContext archiveContext;
- final FetchContext fetchContext;
final long maxBlockLength;
/** Has the segment finished processing? Irreversible. */
private volatile boolean finished;
@@ -81,16 +104,35 @@
final FailureCodeTracker errors;
private boolean finishing;
private boolean scheduled;
+ private final boolean persistent;
+ final int segNum;
- private FECCodec codec;
+ // A persistent hashCode is helpful in debugging, and also means we can
put
+ // these objects into sets etc when we need to.
- public SplitFileFetcherSegment(short splitfileType, ClientCHK[]
splitfileDataKeys, ClientCHK[] splitfileCheckKeys, SplitFileFetcher fetcher,
ArchiveContext archiveContext, FetchContext fetchContext, long maxTempLength,
int recursionLevel, boolean ignoreLastDataBlock) throws MetadataParseException,
FetchException {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ private final int hashCode;
+
+ // After the fetcher has finished with the segment, *and* we have
encoded and started healing inserts,
+ // we can removeFrom(). Note that encodes are queued to the database.
+ private boolean fetcherFinished = false;
+ private boolean encoderFinished = false;
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ private transient FECCodec codec;
+
+ public SplitFileFetcherSegment(short splitfileType, ClientCHK[]
splitfileDataKeys, ClientCHK[] splitfileCheckKeys, SplitFileFetcher fetcher,
ArchiveContext archiveContext, FetchContext blockFetchContext, long
maxTempLength, int recursionLevel, ClientRequester requester, int segNum,
boolean ignoreLastDataBlock) throws MetadataParseException, FetchException {
+ this.segNum = segNum;
+ this.hashCode = super.hashCode();
+ this.persistent = fetcher.persistent;
this.parentFetcher = fetcher;
this.ignoreLastDataBlock = ignoreLastDataBlock;
this.errors = new FailureCodeTracker(false);
this.archiveContext = archiveContext;
this.splitfileType = splitfileType;
+ this.parent = requester;
dataKeys = splitfileDataKeys;
checkKeys = splitfileCheckKeys;
if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
@@ -111,10 +153,9 @@
checkRetries = new int[checkKeys.length];
dataCooldownTimes = new long[dataKeys.length];
checkCooldownTimes = new long[checkKeys.length];
- subSegments = new Vector();
- this.fetchContext = fetchContext;
+ subSegments = new Vector<SplitFileFetcherSubSegment>();
maxBlockLength = maxTempLength;
- blockFetchContext = new FetchContext(fetchContext,
FetchContext.SPLITFILE_DEFAULT_BLOCK_MASK, true);
+ this.blockFetchContext = blockFetchContext;
this.recursionLevel = 0;
if(logMINOR) Logger.minor(this, "Created "+this+" for
"+parentFetcher+" : "+dataRetries.length+" data blocks "+checkRetries.length+"
check blocks");
for(int i=0;i<dataKeys.length;i++)
@@ -123,22 +164,45 @@
if(checkKeys[i] == null) throw new
NullPointerException("Null: check block "+i);
}
- public synchronized boolean isFinished() {
- return finished || parentFetcher.parent.isCancelled();
+ public synchronized boolean isFinished(ObjectContainer container) {
+ if(finished) return true;
+ // Deactivating parent is a *bad* side-effect, so avoid it.
+ boolean deactivateParent = false;
+ if(persistent) {
+ deactivateParent = !container.ext().isActive(parent);
+ if(deactivateParent) container.activate(parent, 1);
+ }
+ boolean ret = parent.isCancelled();
+ if(deactivateParent)
+ container.deactivate(parent, 1);
+ return ret;
}
+
+ public synchronized boolean succeeded() {
+ return finished;
+ }
- public synchronized boolean isFinishing() {
- return isFinished() || finishing;
+ public synchronized boolean isFinishing(ObjectContainer container) {
+ return isFinished(container) || finishing;
}
- /** Throw a FetchException, if we have one. Else do nothing. */
- public synchronized void throwError() throws FetchException {
- if(failureException != null)
- throw failureException;
+ /** Throw a FetchException, if we have one. Else do nothing.
+ * @param container */
+ public synchronized void throwError(ObjectContainer container) throws
FetchException {
+ if(failureException != null) {
+ if(persistent) container.activate(failureException, 5);
+ if(persistent)
+ throw failureException.clone(); // We will
remove, caller is responsible for clone
+ else
+ throw failureException;
+ }
}
- /** Decoded length? */
- public long decodedLength() {
+ /** Decoded length?
+ * @param container */
+ public long decodedLength(ObjectContainer container) {
+ if(persistent)
+ container.activate(decodedData, 1);
return decodedData.size();
}
@@ -166,120 +230,317 @@
return fatallyFailedBlocks;
}
- public void onSuccess(Bucket data, int blockNo,
SplitFileFetcherSubSegment seg, ClientKeyBlock block) {
- if(data == null) throw new NullPointerException();
+ private synchronized short onSuccessInner(Bucket data, int blockNo,
ClientKeyBlock block, ObjectContainer container, ClientContext context,
SplitFileFetcherSubSegment sub) {
+ boolean dontNotify;
+ boolean allFailed = false;
boolean decodeNow = false;
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Fetched block "+blockNo+" on
"+seg+" data="+dataBuckets.length+" check="+checkBuckets.length);
- if(parentFetcher.parent instanceof ClientGetter)
-
((ClientGetter)parentFetcher.parent).addKeyToBinaryBlob(block);
- // No need to unregister key, because it will be cleared in
tripPendingKey().
- boolean dontNotify;
boolean wasDataBlock = false;
- boolean allFailed = false;
- synchronized(this) {
- if(blockNo < dataKeys.length) {
- wasDataBlock = true;
- if(dataKeys[blockNo] == null) {
- if(!startedDecode) Logger.error(this,
"Block already finished: "+blockNo);
- data.free();
- return;
+ if(finished) {
+ // Happens sometimes, don't complain about it...
+ // What this means is simply that there were a bunch of
requests
+ // running, one of them completed, the whole segment
went into
+ // decode, and now the extra requests are surplus to
requirements.
+ // It's a slight overhead, but the alternative is worse.
+ if(logMINOR)
+ Logger.minor(this, "onSuccess() when already
finished for "+this);
+ data.free();
+ return -1;
+ }
+ if(startedDecode) {
+ // Much the same.
+ if(logMINOR)
+ Logger.minor(this, "onSuccess() when started
decode for "+this);
+ data.free();
+ return -1;
+ }
+ if(blockNo < dataKeys.length) {
+ wasDataBlock = true;
+ if(dataKeys[blockNo] == null) {
+ if(!startedDecode) {
+ // This can happen.
+ // We queue a persistent download, we
queue a transient.
+ // The transient goes through
DatastoreChecker first,
+ // and feeds the block to us. We don't
finish, because
+ // we need more blocks. Then the
persistent goes through
+ // the DatastoreChecker, and calls us
again with the same
+ // block.
+ if(logMINOR)
+ Logger.minor(this, "Block
already finished: "+blockNo);
}
- dataRetries[blockNo] = 0; // Prevent healing of
successfully fetched block.
- dataKeys[blockNo] = null;
- dataBuckets[blockNo].setData(data);
- } else if(blockNo < checkKeys.length + dataKeys.length)
{
- blockNo -= dataKeys.length;
- if(checkKeys[blockNo] == null) {
- if(!startedDecode) Logger.error(this,
"Check block already finished: "+blockNo);
- data.free();
- return;
+ data.free();
+ return -1;
+ }
+ dataRetries[blockNo] = 0; // Prevent healing of
successfully fetched block.
+ if(persistent) {
+ container.activate(dataKeys[blockNo], 5);
+ dataKeys[blockNo].removeFrom(container);
+ }
+ dataKeys[blockNo] = null;
+ if(persistent)
+ container.activate(dataBuckets[blockNo], 1);
+ dataBuckets[blockNo].setData(data);
+ if(persistent) {
+ data.storeTo(container);
+ container.store(dataBuckets[blockNo]);
+ container.store(this); // We could return -1,
so we need to store(this) here
+ }
+ } else if(blockNo < checkKeys.length + dataKeys.length) {
+ int checkNo = blockNo - dataKeys.length;
+ if(checkKeys[checkNo] == null) {
+ if(!startedDecode) {
+ if(logMINOR)
+ Logger.minor(this, "Check block
already finished: "+checkNo);
}
- checkRetries[blockNo] = 0; // Prevent healing
of successfully fetched block.
- checkKeys[blockNo] = null;
- checkBuckets[blockNo].setData(data);
- } else
- Logger.error(this, "Unrecognized block number:
"+blockNo, new Exception("error"));
- if(startedDecode) {
- return;
+ data.free();
+ return -1;
+ }
+ checkRetries[checkNo] = 0; // Prevent healing of
successfully fetched block.
+ if(persistent) {
+ container.activate(checkKeys[checkNo], 5);
+ checkKeys[checkNo].removeFrom(container);
+ }
+ checkKeys[checkNo] = null;
+ if(persistent)
+ container.activate(checkBuckets[checkNo], 1);
+ checkBuckets[checkNo].setData(data);
+ if(persistent) {
+ data.storeTo(container);
+ container.store(checkBuckets[checkNo]);
+ container.store(this); // We could return -1,
so we need to store(this) here
+ }
+ } else
+ Logger.error(this, "Unrecognized block number:
"+blockNo, new Exception("error"));
+ if(startedDecode) {
+ return -1;
+ } else {
+ boolean tooSmall = data.size() < CHKBlock.DATA_LENGTH;
+ // Don't count the last data block, since we can't use
it in FEC decoding.
+ if(tooSmall && ((!ignoreLastDataBlock) || (blockNo !=
dataKeys.length - 1))) {
+ fail(new
FetchException(FetchException.INVALID_METADATA, "Block too small in splitfile:
block "+blockNo+" of "+dataKeys.length+" data keys, "+checkKeys.length+" check
keys"), container, context, true);
+ return -1;
+ }
+ if(!(ignoreLastDataBlock && blockNo == dataKeys.length
- 1 && tooSmall))
+ fetchedBlocks++;
+ else
+ // This block is not going to be fetched, and
because of the insertion format.
+ // Thus it is a fatal failure. We need to track
it, because it is quite possible
+ // to fetch the last block, not complete
because it's the last block, and hang.
+ fatallyFailedBlocks++;
+ // However, if we manage to get EVERY data block
(common on a small splitfile),
+ // we don't need to FEC decode.
+ if(wasDataBlock)
+ fetchedDataBlocks++;
+ if(logMINOR) Logger.minor(this, "Fetched
"+fetchedBlocks+" blocks in onSuccess("+blockNo+")");
+ boolean haveDataBlocks = fetchedDataBlocks ==
dataKeys.length;
+ decodeNow = (!startedDecode) && (fetchedBlocks >=
minFetched || haveDataBlocks);
+ if(decodeNow) {
+ startedDecode = true;
+ finishing = true;
} else {
- boolean tooSmall = data.size() <
CHKBlock.DATA_LENGTH;
- // Don't count the last data block, since we
can't use it in FEC decoding.
- if(tooSmall && ((!ignoreLastDataBlock) ||
(blockNo != dataKeys.length - 1))) {
- fail(new
FetchException(FetchException.INVALID_METADATA, "Block too small in splitfile:
block "+blockNo+" of "+dataKeys.length+" data keys, "+checkKeys.length+" check
keys"));
- return;
- }
- if(!(ignoreLastDataBlock && blockNo ==
dataKeys.length - 1 && tooSmall))
- fetchedBlocks++;
- else
- // This block is not going to be
fetched, and because of the insertion format.
- // Thus it is a fatal failure. We need
to track it, because it is quite possible
- // to fetch the last block, not
complete because it's the last block, and hang.
- fatallyFailedBlocks++;
- // However, if we manage to get EVERY data
block (common on a small splitfile),
- // we don't need to FEC decode.
- if(wasDataBlock)
- fetchedDataBlocks++;
- if(logMINOR) Logger.minor(this, "Fetched
"+fetchedBlocks+" blocks in onSuccess("+blockNo+")");
- boolean haveDataBlocks = fetchedDataBlocks ==
dataKeys.length;
- decodeNow = (fetchedBlocks >= minFetched ||
haveDataBlocks);
- if(decodeNow) {
- startedDecode = true;
- finishing = true;
- } else {
- // Avoid hanging when we have one n-1
check blocks, we succeed on the last data block,
- // we don't have the other data blocks,
and we have nothing else fetching.
- allFailed = failedBlocks +
fatallyFailedBlocks > (dataKeys.length + checkKeys.length - minFetched);
- }
+ // Avoid hanging when we have n-1 check blocks,
we succeed on the last data block,
+ // we don't have the other data blocks, and we
have nothing else fetching.
+ allFailed = failedBlocks + fatallyFailedBlocks
> (dataKeys.length + checkKeys.length - minFetched);
}
- dontNotify = !scheduled;
}
- parentFetcher.parent.completedBlock(dontNotify);
- seg.possiblyRemoveFromParent();
+ dontNotify = !scheduled;
+ short res = 0;
+ if(dontNotify) res |= ON_SUCCESS_DONT_NOTIFY;
+ if(allFailed) res |= ON_SUCCESS_ALL_FAILED;
+ if(decodeNow) res |= ON_SUCCESS_DECODE_NOW;
+ return res;
+ }
+
+ private static final short ON_SUCCESS_DONT_NOTIFY = 1;
+ private static final short ON_SUCCESS_ALL_FAILED = 2;
+ private static final short ON_SUCCESS_DECODE_NOW = 4;
+
+ public void onSuccess(Bucket data, int blockNo, ClientKeyBlock block,
ObjectContainer container, ClientContext context, SplitFileFetcherSubSegment
sub) {
+ if(persistent)
+ container.activate(this, 1);
+ if(data == null) throw new NullPointerException();
+ if(logMINOR) Logger.minor(this, "Fetched block "+blockNo+" in
"+this+" data="+dataBuckets.length+" check="+checkBuckets.length);
+ if(parent instanceof ClientGetter)
+ ((ClientGetter)parent).addKeyToBinaryBlob(block,
container, context);
+ // No need to unregister key, because it will be cleared in
tripPendingKey().
+ short result = onSuccessInner(data, blockNo, block, container,
context, sub);
+ if(result == (short)-1) return;
+ finishOnSuccess(result, container, context);
+ }
+
+ private void finishOnSuccess(short result, ObjectContainer container,
ClientContext context) {
+ boolean dontNotify = (result & ON_SUCCESS_DONT_NOTIFY) ==
ON_SUCCESS_DONT_NOTIFY;
+ boolean allFailed = (result & ON_SUCCESS_ALL_FAILED) ==
ON_SUCCESS_ALL_FAILED;
+ boolean decodeNow = (result & ON_SUCCESS_DECODE_NOW) ==
ON_SUCCESS_DECODE_NOW;
+ if(logMINOR) Logger.minor(this, "finishOnSuccess: result =
"+result+" dontNotify="+dontNotify+" allFailed="+allFailed+"
decodeNow="+decodeNow);
+ if(persistent) {
+ container.store(this);
+ container.activate(parent, 1);
+ }
+ parent.completedBlock(dontNotify, container, context);
if(decodeNow) {
- removeSubSegments();
- decode();
+ if(persistent)
+ container.activate(parentFetcher, 1);
+ parentFetcher.removeMyPendingKeys(this, container,
context);
+ if(persistent)
+ container.deactivate(parentFetcher, 1);
+ removeSubSegments(container, context, false);
+ decode(container, context);
} else if(allFailed) {
- fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors));
+ fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors), container, context, true);
}
+ if(persistent) {
+ container.deactivate(parent, 1);
+ }
}
- public void decode() {
+ public void decode(ObjectContainer container, ClientContext context) {
+ if(persistent)
+ container.activate(this, 1);
// Now decode
if(logMINOR) Logger.minor(this, "Decoding
"+SplitFileFetcherSegment.this);
- codec = FECCodec.getCodec(splitfileType, dataKeys.length,
checkKeys.length, blockFetchContext.executor);
+ if(persistent)
+ container.store(this);
+ // Activate buckets
+ if(persistent) {
+ for(int i=0;i<dataBuckets.length;i++)
+ container.activate(dataBuckets[i], 1);
+ }
+ if(persistent) {
+ for(int i=0;i<checkBuckets.length;i++)
+ container.activate(checkBuckets[i], 1);
+ }
+ int data = 0;
+ for(int i=0;i<dataBuckets.length;i++) {
+ if(dataBuckets[i].getData() != null) {
+ data++;
+ }
+ }
+ if(data == dataBuckets.length) {
+ if(logMINOR)
+ Logger.minor(this, "Already decoded");
+ if(persistent) {
+ for(int i=0;i<dataBuckets.length;i++) {
+
container.activate(dataBuckets[i].getData(), 1);
+ }
+ }
+ onDecodedSegment(container, context, null, null, null,
dataBuckets, checkBuckets);
+ return;
+ }
+
if(splitfileType != Metadata.SPLITFILE_NONREDUNDANT) {
- codec.addToQueue(new FECJob(codec, dataBuckets,
checkBuckets, CHKBlock.DATA_LENGTH, fetchContext.bucketFactory, this, true));
+ FECQueue queue = context.fecQueue;
+ // Double-check...
+ int count = 0;
+ for(int i=0;i<dataBuckets.length;i++) {
+ if(dataBuckets[i].getData() != null)
+ count++;
+ }
+ for(int i=0;i<checkBuckets.length;i++) {
+ if(checkBuckets[i].getData() != null)
+ count++;
+ }
+ if(count < dataBuckets.length) {
+ Logger.error(this, "Attempting to decode but
only "+count+" of "+dataBuckets.length+" blocks available!", new
Exception("error"));
+ }
+ if(persistent)
+ container.activate(parent, 1);
+ Bucket lastBlock =
dataBuckets[dataBuckets.length-1].data;
+ if(lastBlock != null) {
+ if(persistent)
+ container.activate(lastBlock, 1);
+ if(ignoreLastDataBlock && lastBlock.size() <
CHKBlock.DATA_LENGTH) {
+ lastBlock.free();
+ if(persistent)
+ lastBlock.removeFrom(container);
+ dataBuckets[dataBuckets.length-1].data
= null;
+ } else if(lastBlock.size() !=
CHKBlock.DATA_LENGTH) {
+ // All new inserts will have the last
block padded. If it was an old insert, ignoreLastDataBlock
+ // would be set. Another way we can get
here is if the last data block of a segment other than
+ // the last data block is too short.
+ fail(new
FetchException(FetchException.INVALID_METADATA, "Last data block is not the
standard size"), container, context, true);
+ }
+ }
+ if(codec == null)
+ codec = FECCodec.getCodec(splitfileType,
dataKeys.length, checkKeys.length);
+ FECJob job = new FECJob(codec, queue, dataBuckets,
checkBuckets, CHKBlock.DATA_LENGTH, context.getBucketFactory(persistent), this,
true, parent.getPriorityClass(), persistent);
+ codec.addToQueue(job,
+ queue, container);
+ if(logMINOR)
+ Logger.minor(this, "Queued FEC job: "+job);
+ if(persistent)
+ container.deactivate(parent, 1);
// Now have all the data blocks (not necessarily all
the check blocks)
+ } else {
+ Logger.error(this, "SPLITFILE_NONREDUNDANT !!");
+ onDecodedSegment(container, context, null, null, null,
null, null);
}
}
- public void onDecodedSegment() {
+ public void onDecodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets2, Bucket[] checkBuckets2,
SplitfileBlock[] dataBlockStatus, SplitfileBlock[] checkBlockStatus) {
+ if(persistent) {
+ container.activate(parentFetcher, 1);
+ container.activate(parent, 1);
+ container.activate(context, 1);
+ }
+ if(codec == null)
+ codec = FECCodec.getCodec(splitfileType,
dataKeys.length, checkKeys.length);
+ // Because we use SplitfileBlock, we DON'T have to copy here.
+ // See FECCallback comments for explanation.
try {
- if(isCollectingBinaryBlob()) {
+ if(persistent) {
for(int i=0;i<dataBuckets.length;i++) {
- Bucket data = dataBuckets[i].getData();
+ // The FECCodec won't set them.
+ // But they should be active.
+ if(dataBlockStatus[i] !=
dataBuckets[i]) {
+ long theirID =
container.ext().getID(dataBlockStatus[i]);
+ long ourID =
container.ext().getID(dataBuckets[i]);
+ if(theirID == ourID) {
+ Logger.error(this,
"DB4O BUG DETECTED IN DECODED SEGMENT!: our block: "+dataBuckets[i]+" block
from decode "+dataBlockStatus[i]+" both have ID "+ourID+" = "+theirID);
+ dataBuckets[i] =
(MinimalSplitfileBlock) dataBlockStatus[i];
+ }
+ }
+ if(logMINOR)
+ Logger.minor(this, "Data block
"+i+" is "+dataBuckets[i]);
+
if(!container.ext().isStored(dataBuckets[i]))
+ Logger.error(this, "Data block
"+i+" is not stored!");
+ else
if(!container.ext().isActive(dataBuckets[i]))
+ Logger.error(this, "Data block
"+i+" is inactive! : "+dataBuckets[i]);
+ if(dataBuckets[i] == null)
+ Logger.error(this, "Data block
"+i+" is null!");
+ else if(dataBuckets[i].data == null)
+ Logger.error(this, "Data block
"+i+" has null data!");
+ else
+
dataBuckets[i].data.storeTo(container);
+ container.store(dataBuckets[i]);
+ }
+ }
+ if(isCollectingBinaryBlob(parent)) {
+ for(int i=0;i<dataBuckets.length;i++) {
+ Bucket data =
dataBlockStatus[i].getData();
if(data == null)
throw new
NullPointerException("Data bucket "+i+" of "+dataBuckets.length+" is null");
try {
- maybeAddToBinaryBlob(data, i,
false);
+ maybeAddToBinaryBlob(data, i,
false, container, context);
} catch (FetchException e) {
- fail(e);
+ fail(e, container, context,
false);
return;
}
}
}
- decodedData =
fetchContext.bucketFactory.makeBucket(maxBlockLength * dataBuckets.length);
- if(logMINOR) Logger.minor(this, "Copying data from data
blocks");
+ decodedData =
context.getBucketFactory(persistent).makeBucket(maxBlockLength *
dataBuckets.length);
+ if(logMINOR) Logger.minor(this, "Copying data from
"+dataBuckets.length+" data blocks");
OutputStream os = decodedData.getOutputStream();
long osSize = 0;
for(int i=0;i<dataBuckets.length;i++) {
+ if(logMINOR) Logger.minor(this, "Copying data
from block "+i);
SplitfileBlock status = dataBuckets[i];
+ if(status == null) throw new
NullPointerException();
Bucket data = status.getData();
if(data == null)
throw new NullPointerException("Data
bucket "+i+" of "+dataBuckets.length+" is null");
+ if(persistent) container.activate(data, 1);
long copied = BucketTools.copyTo(data, os,
Long.MAX_VALUE);
osSize += copied;
if(i != dataBuckets.length-1 && copied != 32768)
@@ -291,18 +552,34 @@
// Must set finished BEFORE calling parentFetcher.
// Otherwise a race is possible that might result in it
not seeing our finishing.
finished = true;
- if(codec == null || !isCollectingBinaryBlob())
-
parentFetcher.segmentFinished(SplitFileFetcherSegment.this);
+ if(persistent) container.store(this);
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT ||
!isCollectingBinaryBlob(parent))
+
parentFetcher.segmentFinished(SplitFileFetcherSegment.this, container, context);
+ // Leave active before queueing
} catch (IOException e) {
Logger.normal(this, "Caught bucket error?: "+e, e);
synchronized(this) {
finished = true;
failureException = new
FetchException(FetchException.BUCKET_ERROR);
}
-
parentFetcher.segmentFinished(SplitFileFetcherSegment.this);
+ if(persistent) container.store(this);
+
parentFetcher.segmentFinished(SplitFileFetcherSegment.this, container, context);
+ if(persistent)
+ encoderFinished(container, context);
return;
}
+ if(splitfileType == Metadata.SPLITFILE_NONREDUNDANT) {
+ if(persistent) {
+ container.deactivate(parentFetcher, 1);
+ container.deactivate(parent, 1);
+ container.deactivate(context, 1);
+ }
+ if(persistent)
+ encoderFinished(container, context);
+ return;
+ }
+
// Now heal
/** Splitfile healing:
@@ -310,69 +587,175 @@
* reconstructed and reinserted.
*/
+ // FIXME don't heal if ignoreLastBlock.
+ Bucket lastBlock = dataBuckets[dataBuckets.length-1].data;
+ if(lastBlock != null) {
+ if(persistent)
+ container.activate(lastBlock, 1);
+ if(lastBlock.size() != CHKBlock.DATA_LENGTH) {
+ try {
+ dataBuckets[dataBuckets.length-1].data =
+ BucketTools.pad(lastBlock,
CHKBlock.DATA_LENGTH, context.persistentBucketFactory, (int) lastBlock.size());
+ lastBlock.free();
+ if(persistent) {
+ lastBlock.removeFrom(container);
+
dataBuckets[dataBuckets.length-1].storeTo(container);
+ }
+ } catch (IOException e) {
+ fail(new
FetchException(FetchException.BUCKET_ERROR, e), container, context, true);
+ }
+ }
+ }
+
// Encode any check blocks we don't have
- if(codec != null) {
- codec.addToQueue(new FECJob(codec, dataBuckets,
checkBuckets, CHKBlock.DATA_LENGTH, fetchContext.bucketFactory, this, false));
+ try {
+ codec.addToQueue(new FECJob(codec, context.fecQueue,
dataBuckets, checkBuckets, 32768, context.getBucketFactory(persistent), this,
false, parent.getPriorityClass(), persistent),
+ context.fecQueue, container);
+ if(persistent) {
+ container.deactivate(parentFetcher, 1);
+ container.deactivate(parent, 1);
+ container.deactivate(context, 1);
}
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t, t);
+ if(persistent)
+ encoderFinished(container, context);
+ }
}
- public void onEncodedSegment() {
+ public void onEncodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets2, Bucket[] checkBuckets2,
SplitfileBlock[] dataBlockStatus, SplitfileBlock[] checkBlockStatus) {
+ try {
+ if(persistent) {
+ container.activate(parent, 1);
+ }
+ if(logMINOR)
+ Logger.minor(this, "Encoded "+this);
+ // Because we use SplitfileBlock, we DON'T have to copy here.
+ // See FECCallback comments for explanation.
synchronized(this) {
// Now insert *ALL* blocks on which we had at least one
failure, and didn't eventually succeed
for(int i=0;i<dataBuckets.length;i++) {
boolean heal = false;
+ if(dataBuckets[i] == null) {
+ Logger.error(this, "Data bucket "+i+"
is null in onEncodedSegment on "+this);
+ continue;
+ }
+ if(dataBuckets[i] != dataBlockStatus[i]) {
+ Logger.error(this, "Data block "+i+" :
ours is "+dataBuckets[i]+" codec's is "+dataBlockStatus[i]);
+ if(persistent) {
+
if(container.ext().getID(dataBuckets[i]) ==
container.ext().getID(dataBlockStatus[i]))
+ Logger.error(this,
"DB4O BUG DETECTED: SAME UID FOR TWO OBJECTS:
"+dataBuckets[i]+"="+container.ext().getID(dataBuckets[i])+" and
"+dataBlockStatus[i]+"="+container.ext().getID(dataBlockStatus[i])+" ...
attempting workaround ...");
+ Logger.error(this, "Ours is
"+(container.ext().isStored(dataBuckets[i])?"stored
":"")+(container.ext().isActive(dataBuckets[i])?"active ":"")+" UUID
"+container.ext().getID(dataBuckets[i]));
+ Logger.error(this, "Theirs is
"+(container.ext().isStored(dataBlockStatus[i])?"stored
":"")+(container.ext().isActive(dataBlockStatus[i])?"active ":"")+" UUID
"+container.ext().getID(dataBlockStatus[i]));
+ }
+ dataBuckets[i] =
(MinimalSplitfileBlock) dataBlockStatus[i];
+ }
Bucket data = dataBuckets[i].getData();
+ if(data == null) {
+ Logger.error(this, "Data bucket "+i+"
has null contents in onEncodedSegment on "+this+" for block "+dataBuckets[i]);
+
if(!container.ext().isStored(dataBuckets[i]))
+ Logger.error(this, "Splitfile
block appears not to be stored");
+ else
if(!container.ext().isActive(dataBuckets[i]))
+ Logger.error(this, "Splitfile
block appears not to be active");
+ continue;
+ }
+
if(dataRetries[i] > 0)
heal = true;
if(heal) {
- queueHeal(data);
+ queueHeal(data, container, context);
+ dataBuckets[i].data = null; // So that
it doesn't remove the data
} else {
dataBuckets[i].data.free();
- dataBuckets[i].data = null;
}
+ if(persistent)
+ dataBuckets[i].removeFrom(container);
dataBuckets[i] = null;
+ if(persistent && dataKeys[i] != null)
+ dataKeys[i].removeFrom(container);
dataKeys[i] = null;
}
for(int i=0;i<checkBuckets.length;i++) {
boolean heal = false;
+ // Check buckets will already be active because
the FEC codec
+ // has been using them.
+ if(checkBuckets[i] == null) {
+ Logger.error(this, "Check bucket "+i+"
is null in onEncodedSegment on "+this);
+ continue;
+ }
+ if(checkBuckets[i] != checkBlockStatus[i]) {
+ Logger.error(this, "Check block "+i+" :
ours is "+checkBuckets[i]+" codec's is "+checkBlockStatus[i]);
+ if(persistent) {
+
if(container.ext().getID(checkBuckets[i]) ==
container.ext().getID(checkBlockStatus[i]))
+ Logger.error(this,
"DB4O BUG DETECTED: SAME UID FOR TWO OBJECTS:
"+checkBuckets[i]+"="+container.ext().getID(checkBuckets[i])+" and
"+checkBlockStatus[i]+"="+container.ext().getID(checkBlockStatus[i])+" ...
attempting workaround ...");
+ Logger.error(this, "Ours is
"+(container.ext().isStored(checkBuckets[i])?"stored
":"")+(container.ext().isActive(checkBuckets[i])?"active ":"")+" UUID
"+container.ext().getID(checkBuckets[i]));
+ Logger.error(this, "Theirs is
"+(container.ext().isStored(checkBlockStatus[i])?"stored
":"")+(container.ext().isActive(checkBlockStatus[i])?"active ":"")+" UUID
"+container.ext().getID(checkBlockStatus[i]));
+ }
+ checkBuckets[i] =
(MinimalSplitfileBlock) checkBlockStatus[i];
+ }
Bucket data = checkBuckets[i].getData();
+ if(data == null) {
+ Logger.error(this, "Check bucket "+i+"
has null contents in onEncodedSegment on "+this+" for block "+checkBuckets[i]);
+
if(!container.ext().isStored(dataBuckets[i]))
+ Logger.error(this, "Splitfile
block appears not to be stored");
+ else
if(!container.ext().isActive(dataBuckets[i]))
+ Logger.error(this, "Splitfile
block appears not to be active");
+ continue;
+ }
try {
- maybeAddToBinaryBlob(data, i, true);
+ maybeAddToBinaryBlob(data, i, true,
container, context);
} catch (FetchException e) {
- fail(e);
+ fail(e, container, context, false);
return;
}
if(checkRetries[i] > 0)
heal = true;
if(heal) {
- queueHeal(data);
+ queueHeal(data, container, context);
+ checkBuckets[i].data = null;
} else {
- checkBuckets[i].data.free();
+ data.free();
}
+ if(persistent)
+ checkBuckets[i].removeFrom(container);
checkBuckets[i] = null;
+ if(persistent && checkKeys[i] != null)
+ checkKeys[i].removeFrom(container);
checkKeys[i] = null;
}
+ if(persistent && !fetcherFinished) {
+ container.store(this);
+ }
}
// Defer the completion until we have generated healing blocks
if we are collecting binary blobs.
- if(isCollectingBinaryBlob())
-
parentFetcher.segmentFinished(SplitFileFetcherSegment.this);
+ if(isCollectingBinaryBlob(parent)) {
+ if(persistent)
+ container.activate(parentFetcher, 1);
+
parentFetcher.segmentFinished(SplitFileFetcherSegment.this, container, context);
+ if(persistent)
+ container.deactivate(parentFetcher, 1);
+ }
+ } finally {
+ if(persistent)
+ encoderFinished(container, context);
+ }
}
- boolean isCollectingBinaryBlob() {
- if(parentFetcher.parent instanceof ClientGetter) {
- ClientGetter getter = (ClientGetter)
(parentFetcher.parent);
+ boolean isCollectingBinaryBlob(ClientRequester parent) {
+ if(parent instanceof ClientGetter) {
+ ClientGetter getter = (ClientGetter) (parent);
return getter.collectingBinaryBlob();
} else return false;
}
- private void maybeAddToBinaryBlob(Bucket data, int i, boolean check)
throws FetchException {
- if(parentFetcher.parent instanceof ClientGetter) {
- ClientGetter getter = (ClientGetter)
(parentFetcher.parent);
+ private void maybeAddToBinaryBlob(Bucket data, int i, boolean check,
ObjectContainer container, ClientContext context) throws FetchException {
+ if(parent instanceof ClientGetter) {
+ ClientGetter getter = (ClientGetter) (parent);
if(getter.collectingBinaryBlob()) {
try {
ClientCHKBlock block =
ClientCHKBlock.encode(data,
false, true, (short)-1, data.size());
- getter.addKeyToBinaryBlob(block);
+ getter.addKeyToBinaryBlob(block,
container, context);
} catch (CHKEncodeException e) {
Logger.error(this, "Failed to encode
(collecting binary blob) "+(check?"check":"data")+" block "+i+": "+e, e);
throw new
FetchException(FetchException.INTERNAL_ERROR, "Failed to encode for binary
blob: "+e);
@@ -383,129 +766,249 @@
}
}
- private void queueHeal(Bucket data) {
+ /**
+ * Queue the data for a healing insert. The data will be freed when it
the healing insert completes,
+ * or immediately if a healing insert isn't queued. If we are
persistent, copies the data.
+ * @param data
+ * @param container
+ * @param context
+ */
+ private void queueHeal(Bucket data, ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ try {
+ Bucket copy =
context.tempBucketFactory.makeBucket(data.size());
+ BucketTools.copy(data, copy);
+ data.free();
+ if(persistent)
+ data.removeFrom(container);
+ data = copy;
+ } catch (IOException e) {
+ Logger.normal(this, "Failed to copy data for
healing: "+e, e);
+ data.free();
+ if(persistent)
+ data.removeFrom(container);
+ return;
+ }
+ }
if(logMINOR) Logger.minor(this, "Queueing healing insert for
"+data+" on "+this);
- fetchContext.healingQueue.queue(data);
+ context.healingQueue.queue(data, context);
}
- /** This is after any retries and therefore is either out-of-retries or
fatal */
- public void onFatalFailure(FetchException e, int blockNo,
SplitFileFetcherSubSegment seg) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ /** This is after any retries and therefore is either out-of-retries or
fatal
+ * @param container */
+ public void onFatalFailure(FetchException e, int blockNo,
SplitFileFetcherSubSegment seg, ObjectContainer container, ClientContext
context) {
+ if(persistent)
+ container.activate(this, 1);
if(logMINOR) Logger.minor(this, "Permanently failed block:
"+blockNo+" on "+this+" : "+e, e);
boolean allFailed;
// Since we can't keep the key, we need to unregister for it at
this point to avoid a memory leak
- NodeCHK key = getBlockNodeKey(blockNo);
- if(key != null) seg.unregisterKey(key);
synchronized(this) {
- if(isFinishing()) return; // this failure is now
irrelevant, and cleanup will occur on the decoder thread
+ if(isFinishing(container)) return; // this failure is
now irrelevant, and cleanup will occur on the decoder thread
if(blockNo < dataKeys.length) {
if(dataKeys[blockNo] == null) {
Logger.error(this, "Block already
finished: "+blockNo);
return;
}
+ if(persistent) {
+ container.activate(dataKeys[blockNo],
1);
+ dataKeys[blockNo].removeFrom(container);
+ }
dataKeys[blockNo] = null;
} else if(blockNo < checkKeys.length + dataKeys.length)
{
if(checkKeys[blockNo-dataKeys.length] == null) {
Logger.error(this, "Check block already
finished: "+blockNo);
return;
}
+ if(persistent) {
+
container.activate(checkKeys[blockNo-dataKeys.length], 1);
+
checkKeys[blockNo-dataKeys.length].removeFrom(container);
+ }
checkKeys[blockNo-dataKeys.length] = null;
} else
Logger.error(this, "Unrecognized block number:
"+blockNo, new Exception("error"));
// :(
+ boolean deactivateParent = false; // can get called
from wierd places, don't deactivate parent if not necessary
+ if(persistent) {
+ deactivateParent =
!container.ext().isActive(parent);
+ if(deactivateParent) container.activate(parent,
1);
+ }
if(e.isFatal()) {
fatallyFailedBlocks++;
- parentFetcher.parent.fatallyFailedBlock();
+ parent.fatallyFailedBlock(container, context);
} else {
failedBlocks++;
- parentFetcher.parent.failedBlock();
+ parent.failedBlock(container, context);
}
+ if(deactivateParent)
+ container.deactivate(parent, 1);
// Once it is no longer possible to have a successful
fetch, fail...
allFailed = failedBlocks + fatallyFailedBlocks >
(dataKeys.length + checkKeys.length - minFetched);
}
+ if(persistent)
+ container.store(this);
if(allFailed)
- fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors));
- else
- seg.possiblyRemoveFromParent();
+ fail(new FetchException(FetchException.SPLITFILE_ERROR,
errors), container, context, false);
+ else if(seg != null)
+ seg.possiblyRemoveFromParent(container, context);
}
- /** A request has failed non-fatally, so the block may be retried */
- public void onNonFatalFailure(FetchException e, int blockNo,
SplitFileFetcherSubSegment seg, RequestScheduler sched) {
+ /** A request has failed non-fatally, so the block may be retried
+ * @param container */
+ public void onNonFatalFailure(FetchException e, int blockNo,
SplitFileFetcherSubSegment seg, ObjectContainer container, ClientContext
context) {
+ if(persistent) {
+ container.activate(blockFetchContext, 1);
+ }
+ int maxTries = blockFetchContext.maxNonSplitfileRetries;
+ RequestScheduler sched = context.getFetchScheduler(false);
+ seg.removeBlockNum(blockNo, container, false);
+ SplitFileFetcherSubSegment sub = onNonFatalFailure(e, blockNo,
seg, container, context, sched, maxTries);
+ if(sub != null) {
+ sub.reschedule(container, context);
+ if(persistent && sub != seg) container.deactivate(sub,
1);
+ }
+ }
+
+ public void onNonFatalFailure(FetchException[] failures, int[]
blockNos, SplitFileFetcherSubSegment seg, ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(blockFetchContext, 1);
+ }
+ int maxTries = blockFetchContext.maxNonSplitfileRetries;
+ RequestScheduler sched = context.getFetchScheduler(false);
+ HashSet<SplitFileFetcherSubSegment> toSchedule = null;
+ seg.removeBlockNums(blockNos, container);
+ for(int i=0;i<failures.length;i++) {
+ SplitFileFetcherSubSegment sub =
+ onNonFatalFailure(failures[i], blockNos[i],
seg, container, context, sched, maxTries);
+ if(sub != null) {
+ if(toSchedule == null)
+ toSchedule = new
HashSet<SplitFileFetcherSubSegment>();
+ toSchedule.add(sub);
+ }
+ }
+ if(toSchedule != null && !toSchedule.isEmpty()) {
+ for(SplitFileFetcherSubSegment sub : toSchedule) {
+ sub.reschedule(container, context);
+ if(persistent && sub != seg)
container.deactivate(sub, 1);
+ }
+ }
+ }
+
+ /**
+ * Caller must set(this) iff returns true.
+ */
+ private SplitFileFetcherSubSegment onNonFatalFailure(FetchException e,
int blockNo, SplitFileFetcherSubSegment seg, ObjectContainer container,
ClientContext context, RequestScheduler sched, int maxTries) {
+ if(logMINOR) Logger.minor(this, "Calling onNonFatalFailure for
block "+blockNo+" on "+this+" from "+seg);
int tries;
- int maxTries = blockFetchContext.maxNonSplitfileRetries;
boolean failed = false;
boolean cooldown = false;
ClientCHK key;
SplitFileFetcherSubSegment sub = null;
synchronized(this) {
- if(isFinished()) return;
+ if(isFinished(container)) return null;
if(blockNo < dataKeys.length) {
key = dataKeys[blockNo];
+ if(persistent)
+ container.activate(key, 5);
tries = ++dataRetries[blockNo];
if(tries > maxTries && maxTries >= 0) failed =
true;
else {
- sub = getSubSegment(tries);
- if(tries %
ClientRequestScheduler.COOLDOWN_RETRIES == 0) {
+ sub = getSubSegment(tries, container,
false, seg);
+ if(tries %
RequestScheduler.COOLDOWN_RETRIES == 0) {
long now =
System.currentTimeMillis();
if(dataCooldownTimes[blockNo] >
now)
Logger.error(this,
"Already on the cooldown queue! for "+this+" data block no "+blockNo, new
Exception("error"));
else
-
dataCooldownTimes[blockNo] = sched.queueCooldown(key, sub);
+
dataCooldownTimes[blockNo] = sched.queueCooldown(key, sub, container);
cooldown = true;
}
}
} else {
int checkNo = blockNo - dataKeys.length;
key = checkKeys[checkNo];
+ if(persistent)
+ container.activate(key, 5);
tries = ++checkRetries[checkNo];
if(tries > maxTries && maxTries >= 0) failed =
true;
else {
- sub = getSubSegment(tries);
- if(tries %
ClientRequestScheduler.COOLDOWN_RETRIES == 0) {
+ sub = getSubSegment(tries, container,
false, seg);
+ if(tries %
RequestScheduler.COOLDOWN_RETRIES == 0) {
long now =
System.currentTimeMillis();
if(checkCooldownTimes[checkNo]
> now)
Logger.error(this,
"Already on the cooldown queue! for "+this+" check block no "+blockNo, new
Exception("error"));
else
-
checkCooldownTimes[checkNo] = sched.queueCooldown(key, sub);
+
checkCooldownTimes[checkNo] = sched.queueCooldown(key, sub, container);
cooldown = true;
}
}
}
}
+ if(tries != seg.retryCount+1) {
+ Logger.error(this, "Failed on segment "+seg+" but tries
for block "+blockNo+" (after increment) is "+tries);
+ }
if(failed) {
- onFatalFailure(e, blockNo, seg);
+ onFatalFailure(e, blockNo, seg, container, context);
if(logMINOR)
Logger.minor(this, "Not retrying block
"+blockNo+" on "+this+" : tries="+tries+"/"+maxTries);
- return;
+ return null;
}
+ boolean mustSchedule = false;
if(cooldown) {
- // Register to the next sub-segment before removing
from the old one.
- sub.getScheduler().addPendingKey(key, sub);
- seg.unregisterKey(key.getNodeKey());
+ // Registered to cooldown queue
+ if(logMINOR)
+ Logger.minor(this, "Added to cooldown queue:
"+key+" for "+this+" was on segment "+seg+" now registered to "+sub);
} else {
// If we are here we are going to retry
- // Unregister from the old sub-segment before
registering on the new.
- seg.unregisterKey(key.getNodeKey());
+ mustSchedule = sub.add(blockNo, true, container,
context, false);
if(logMINOR)
Logger.minor(this, "Retrying block "+blockNo+"
on "+this+" : tries="+tries+"/"+maxTries+" : "+sub);
- sub.add(blockNo, false);
}
+ if(persistent) {
+ container.store(this);
+ container.deactivate(key, 5);
+ }
+ if(mustSchedule)
+ return sub;
+ else
+ return null;
}
- private SplitFileFetcherSubSegment getSubSegment(int retryCount) {
+ private SplitFileFetcherSubSegment getSubSegment(int retryCount,
ObjectContainer container, boolean noCreate, SplitFileFetcherSubSegment
dontDeactivate) {
SplitFileFetcherSubSegment sub;
+ if(persistent)
+ container.activate(subSegments, 1);
+ SplitFileFetcherSubSegment ret = null;
+ int dupes = 0;
synchronized(this) {
for(int i=0;i<subSegments.size();i++) {
sub = (SplitFileFetcherSubSegment)
subSegments.get(i);
- if(sub.retryCount == retryCount) return sub;
+ if(persistent) container.activate(sub, 1);
+ if(sub.retryCount == retryCount) {
+ if(ret != null) {
+ Logger.error(this, "Duplicate
subsegment (count="+dupes+"): "+ret+" and "+sub+" for retry count
"+retryCount+" on "+this);
+ dupes++;
+ } else
+ ret = sub;
+ }
+ if(persistent && sub != ret && sub !=
dontDeactivate) container.deactivate(sub, 1);
}
- sub = new SplitFileFetcherSubSegment(this, retryCount);
+ if(ret != null) return ret;
+ if(noCreate) return null;
+ boolean deactivateParent = false;
+ if(persistent) {
+ deactivateParent =
!container.ext().isActive(parent);
+ if(deactivateParent) container.activate(parent,
1);
+ }
+ sub = new SplitFileFetcherSubSegment(this, parent,
retryCount);
+ if(deactivateParent)
+ container.deactivate(parent, 1);
subSegments.add(sub);
}
+ if(persistent)
+ container.ext().store(subSegments, 1);
return sub;
}
- private void fail(FetchException e) {
+ void fail(FetchException e, ObjectContainer container, ClientContext
context, boolean dontDeactivateParent) {
synchronized(this) {
if(finished) return;
finished = true;
@@ -516,47 +1019,76 @@
}
for(int i=0;i<dataBuckets.length;i++) {
MinimalSplitfileBlock b = dataBuckets[i];
+ if(persistent)
+ container.activate(b, 2);
if(b != null) {
Bucket d = b.getData();
if(d != null) d.free();
}
+ if(persistent)
+ b.removeFrom(container);
dataBuckets[i] = null;
+ if(persistent && dataKeys[i] != null)
+ dataKeys[i].removeFrom(container);
+ dataKeys[i] = null;
}
for(int i=0;i<checkBuckets.length;i++) {
MinimalSplitfileBlock b = checkBuckets[i];
+ if(persistent)
+ container.activate(b, 2);
if(b != null) {
Bucket d = b.getData();
if(d != null) d.free();
}
+ if(persistent)
+ b.removeFrom(container);
checkBuckets[i] = null;
+ if(persistent && checkKeys[i] != null)
+ checkKeys[i].removeFrom(container);
+ checkKeys[i] = null;
}
}
- removeSubSegments();
- parentFetcher.segmentFinished(this);
+ removeSubSegments(container, context, false);
+ if(persistent) {
+ container.store(this);
+ container.activate(parentFetcher, 1);
+ }
+ parentFetcher.removeMyPendingKeys(this, container, context);
+ parentFetcher.segmentFinished(this, container, context);
+ if(persistent && !dontDeactivateParent)
+ container.deactivate(parentFetcher, 1);
}
- public void schedule() {
+ public SplitFileFetcherSubSegment schedule(ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ }
try {
- SplitFileFetcherSubSegment seg = getSubSegment(0);
- for(int
i=0;i<dataRetries.length+checkRetries.length;i++) {
- seg.add(i, true);
- }
+ SplitFileFetcherSubSegment seg = getSubSegment(0,
container, false, null);
+ if(persistent)
+ container.activate(seg, 1);
+ seg.addAll(dataRetries.length+checkRetries.length,
true, container, context, false);
+
+ if(logMINOR)
+ Logger.minor(this, "scheduling "+seg+" :
"+seg.blockNums);
- seg.schedule();
synchronized(this) {
scheduled = true;
}
- parentFetcher.parent.notifyClients();
- if(logMINOR)
- Logger.minor(this, "scheduling "+seg+" :
"+seg.blockNums);
+ if(persistent)
+ container.store(this);
+ if(persistent)
+ container.deactivate(seg, 1);
+ return seg;
} catch (Throwable t) {
Logger.error(this, "Caught "+t+" scheduling "+this, t);
- fail(new FetchException(FetchException.INTERNAL_ERROR,
t));
+ fail(new FetchException(FetchException.INTERNAL_ERROR,
t), container, context, true);
+ return null;
}
}
- public void cancel() {
- fail(new FetchException(FetchException.CANCELLED));
+ public void cancel(ObjectContainer container, ClientContext context) {
+ fail(new FetchException(FetchException.CANCELLED), container,
context, true);
}
public void onBlockSetFinished(ClientGetState state) {
@@ -567,17 +1099,21 @@
// Ignore
}
- public synchronized ClientCHK getBlockKey(int blockNum) {
+ public synchronized ClientCHK getBlockKey(int blockNum, ObjectContainer
container) {
+ ClientCHK ret;
if(blockNum < 0) return null;
else if(blockNum < dataKeys.length)
- return dataKeys[blockNum];
+ ret = dataKeys[blockNum];
else if(blockNum < dataKeys.length + checkKeys.length)
- return checkKeys[blockNum - dataKeys.length];
+ ret = checkKeys[blockNum - dataKeys.length];
else return null;
+ if(persistent)
+ container.activate(ret, 5);
+ return ret;
}
- public NodeCHK getBlockNodeKey(int blockNum) {
- ClientCHK key = getBlockKey(blockNum);
+ public NodeCHK getBlockNodeKey(int blockNum, ObjectContainer container)
{
+ ClientCHK key = getBlockKey(blockNum, container);
if(key != null) return key.getNodeCHK();
else return null;
}
@@ -590,7 +1126,7 @@
* case we get duplicated structures in memory.
* @return True if we removed the subsegment.
*/
- public synchronized boolean maybeRemoveSeg(SplitFileFetcherSubSegment
segment) {
+ public synchronized boolean maybeRemoveSeg(SplitFileFetcherSubSegment
segment, ObjectContainer container) {
int retryCount = segment.retryCount;
boolean dontRemove = true;
for(int i=0;i<dataRetries.length;i++)
@@ -603,28 +1139,46 @@
dontRemove = false;
break;
}
- if(isFinishing()) dontRemove = false;
+ if(isFinishing(container)) dontRemove = false;
if(dontRemove) return false;
if(logMINOR)
Logger.minor(this, "Removing sub segment: "+segment+"
for retry count "+retryCount);
+ if(persistent) {
+ container.activate(subSegments, 1);
+ }
for(int i=0;i<subSegments.size();i++) {
if(segment.equals(subSegments.get(i))) {
subSegments.remove(i);
i--;
}
}
+ if(persistent)
+ container.store(subSegments);
return true;
}
- private void removeSubSegments() {
+ private void removeSubSegments(ObjectContainer container, ClientContext
context, boolean finishing) {
+ if(persistent)
+ container.activate(subSegments, 1);
SplitFileFetcherSubSegment[] deadSegs;
synchronized(this) {
deadSegs = (SplitFileFetcherSubSegment[])
subSegments.toArray(new SplitFileFetcherSubSegment[subSegments.size()]);
subSegments.clear();
}
+ if(persistent && deadSegs.length > 0)
+ container.store(this);
for(int i=0;i<deadSegs.length;i++) {
- deadSegs[i].kill();
+ if(persistent)
+ container.activate(deadSegs[i], 1);
+ deadSegs[i].kill(container, context, true);
+
context.getChkFetchScheduler().removeFromStarterQueue(deadSegs[i], container,
true);
+ if(persistent)
+ container.deactivate(deadSegs[i], 1);
}
+ if(persistent && !finishing) {
+ container.store(this);
+ container.store(subSegments);
+ }
}
public synchronized long getCooldownWakeup(int blockNum) {
@@ -634,46 +1188,63 @@
return checkCooldownTimes[blockNum - dataKeys.length];
}
- public void requeueAfterCooldown(Key key, long time) {
- Vector v = null;
+ /**
+ * @return True if the key was wanted, false otherwise.
+ */
+ public boolean requeueAfterCooldown(Key key, long time, ObjectContainer
container, ClientContext context, SplitFileFetcherSubSegment dontDeactivate) {
+ if(persistent)
+ container.activate(this, 1);
+ Vector<SplitFileFetcherSubSegment> v = null;
boolean notFound = true;
synchronized(this) {
- if(isFinishing()) return;
+ if(isFinishing(container)) return false;
int maxTries = blockFetchContext.maxNonSplitfileRetries;
for(int i=0;i<dataKeys.length;i++) {
if(dataKeys[i] == null) continue;
- if(dataKeys[i].getNodeKey().equals(key)) {
+ ClientKey k = dataKeys[i];
+ if(persistent)
+ container.activate(k, 5);
+ if(k.getNodeKey().equals(key)) {
if(dataCooldownTimes[i] > time) {
if(logMINOR)
- Logger.minor(this, "Not
retrying after cooldown for data block "+i+"as deadline has not passed yet on
"+this);
- return;
+ Logger.minor(this, "Not
retrying after cooldown for data block "+i+" as deadline has not passed yet on
"+this+" remaining time: "+(dataCooldownTimes[i]-time)+"ms");
+ return false;
}
int tries = dataRetries[i];
- SplitFileFetcherSubSegment sub =
getSubSegment(tries);
+ SplitFileFetcherSubSegment sub =
getSubSegment(tries, container, false, dontDeactivate);
if(logMINOR)
Logger.minor(this, "Retrying after
cooldown on "+this+": data block "+i+" on "+this+" :
tries="+tries+"/"+maxTries+" : "+sub);
- if(v == null) v = new Vector();
- sub.add(i, true);
+ if(v == null) v = new
Vector<SplitFileFetcherSubSegment>();
+ sub.add(i, true, container, context, true);
if(!v.contains(sub)) v.add(sub);
notFound = false;
+ } else {
+ if(persistent)
+ container.deactivate(k, 5);
}
}
for(int i=0;i<checkKeys.length;i++) {
if(checkKeys[i] == null) continue;
- if(checkKeys[i].getNodeKey().equals(key)) {
+ ClientKey k = checkKeys[i];
+ if(persistent)
+ container.activate(k, 5);
+ if(k.getNodeKey().equals(key)) {
if(checkCooldownTimes[i] > time) {
if(logMINOR)
- Logger.minor(this, "Not
retrying after cooldown for data block "+i+" as deadline has not passed yet on
"+this);
- return;
+ Logger.minor(this, "Not
retrying after cooldown for check block "+i+" as deadline has not passed yet on
"+this+" remaining time: "+(checkCooldownTimes[i]-time)+"ms");
+ return false;
}
int tries = checkRetries[i];
- SplitFileFetcherSubSegment sub =
getSubSegment(tries);
+ SplitFileFetcherSubSegment sub =
getSubSegment(tries, container, false, dontDeactivate);
if(logMINOR)
Logger.minor(this, "Retrying after
cooldown on "+this+": check block "+i+" on "+this+" :
tries="+tries+"/"+maxTries+" : "+sub);
- if(v == null) v = new Vector();
- sub.add(i+dataKeys.length, true);
+ if(v == null) v = new
Vector<SplitFileFetcherSubSegment>();
+ sub.add(i+dataKeys.length, true, container,
context, true);
if(!v.contains(sub)) v.add(sub);
notFound = false;
+ } else {
+ if(persistent)
+ container.deactivate(k, 5);
}
}
}
@@ -682,37 +1253,91 @@
}
if(v != null) {
for(int i=0;i<v.size();i++) {
- ((SplitFileFetcherSubSegment)
v.get(i)).schedule();
+ SplitFileFetcherSubSegment sub =
(SplitFileFetcherSubSegment) v.get(i);
+ if(persistent && sub != dontDeactivate)
+ container.activate(sub, 1);
+ RandomGrabArray rga = sub.getParentGrabArray();
+ if(rga == null) {
+ sub.reschedule(container, context);
+ } else {
+// if(logMINOR) {
+ if(persistent)
+ container.activate(rga,
1);
+ if(!rga.contains(sub,
container)) {
+ Logger.error(this,
"Sub-segment has RGA but isn't registered to it!!: "+sub+" for "+rga);
+
sub.reschedule(container, context);
+ }
+ if(persistent)
+
container.deactivate(rga, 1);
+// }
+ }
+ if(persistent && sub != dontDeactivate)
+ container.deactivate(sub, 1);
}
}
+ return true;
}
- public synchronized long getCooldownWakeupByKey(Key key) {
+ public synchronized long getCooldownWakeupByKey(Key key,
ObjectContainer container) {
for(int i=0;i<dataKeys.length;i++) {
if(dataKeys[i] == null) continue;
- if(dataKeys[i].getNodeKey().equals(key)) {
+ ClientKey k = dataKeys[i];
+ if(persistent)
+ container.activate(k, 5);
+ if(k.getNodeKey().equals(key)) {
return dataCooldownTimes[i];
+ } else {
+ if(persistent)
+ container.deactivate(k, 5);
}
}
for(int i=0;i<checkKeys.length;i++) {
if(checkKeys[i] == null) continue;
+ ClientKey k = checkKeys[i];
+ if(persistent)
+ container.activate(k, 5);
if(checkKeys[i].getNodeKey().equals(key)) {
return checkCooldownTimes[i];
+ } else {
+ if(persistent)
+ container.deactivate(k, 5);
}
}
return -1;
}
- public synchronized int getBlockNumber(Key key) {
- for(int i=0;i<dataKeys.length;i++)
- if(dataKeys[i] != null &&
dataKeys[i].getNodeKey().equals(key)) return i;
- for(int i=0;i<checkKeys.length;i++)
- if(checkKeys[i] != null &&
checkKeys[i].getNodeKey().equals(key)) return dataKeys.length+i;
+ public synchronized int getBlockNumber(Key key, ObjectContainer
container) {
+ for(int i=0;i<dataKeys.length;i++) {
+ ClientCHK k = dataKeys[i];
+ if(k == null) continue;
+ if(persistent)
+ container.activate(k, 5);
+ if(k.getRoutingKey() == null)
+ throw new NullPointerException("Routing key is
null yet key exists for data block "+i+" of "+this+(persistent?("
stored="+container.ext().isStored(k)+" active="+container.ext().isActive(k)) :
""));
+ if(k.getNodeKey().equals(key)) return i;
+ else {
+ if(persistent)
+ container.deactivate(k, 5);
+ }
+ }
+ for(int i=0;i<checkKeys.length;i++) {
+ ClientCHK k = checkKeys[i];
+ if(k == null) continue;
+ if(persistent)
+ container.activate(k, 5);
+ if(k.getRoutingKey() == null)
+ throw new NullPointerException("Routing key is
null yet key exists for check block "+i+" of "+this);
+ if(k.getNodeKey().equals(key)) return dataKeys.length+i;
+ else {
+ if(persistent)
+ container.deactivate(k, 5);
+ }
+ }
return -1;
}
public synchronized Integer[] getKeyNumbersAtRetryLevel(int retryCount)
{
- Vector v = new Vector();
+ Vector<Integer> v = new Vector<Integer>();
for(int i=0;i<dataRetries.length;i++) {
if(dataKeys[i] == null) continue;
if(dataRetries[i] == retryCount)
@@ -723,7 +1348,7 @@
if(checkRetries[i] == retryCount)
v.add(Integer.valueOf(i+dataKeys.length));
}
- return (Integer[]) v.toArray(new Integer[v.size()]);
+ return v.toArray(new Integer[v.size()]);
}
public synchronized void resetCooldownTimes(Integer[] blockNums) {
@@ -736,8 +1361,291 @@
}
}
- public void freeDecodedData() {
+ public void onFailed(Throwable t, ObjectContainer container,
ClientContext context) {
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "FEC decode or encode failed
but already finished: "+t, t);
+ return;
+ }
+ finished = true;
+ }
+ if(persistent)
+ container.activate(this, 1);
+ this.fail(new FetchException(FetchException.INTERNAL_ERROR,
"FEC failure: "+t, t), container, context, false);
+ }
+
+ public boolean haveBlock(int blockNo, ObjectContainer container) {
+ if(blockNo < dataBuckets.length) {
+ boolean wasActive = false;
+ if(dataBuckets[blockNo] == null) return false;
+ if(persistent) {
+ wasActive =
container.ext().isActive(dataBuckets[blockNo]);
+ if(!wasActive)
+
container.activate(dataBuckets[blockNo], 1);
+ }
+ boolean retval = dataBuckets[blockNo].hasData();
+ if(persistent && !wasActive)
+ container.deactivate(dataBuckets[blockNo], 1);
+ return retval;
+ } else {
+ boolean wasActive = false;
+ blockNo -= dataBuckets.length;
+ if(checkBuckets[blockNo] == null) return false;
+ if(persistent) {
+ wasActive =
container.ext().isActive(checkBuckets[blockNo]);
+ if(!wasActive)
+
container.activate(checkBuckets[blockNo], 1);
+ }
+ boolean retval = checkBuckets[blockNo].hasData();
+ if(persistent && !wasActive)
+ container.deactivate(checkBuckets[blockNo], 1);
+ return retval;
+ }
+ }
+
+ public boolean dontCache(ObjectContainer container) {
+ return !blockFetchContext.cacheLocalRequests;
+ }
+
+ public short getPriorityClass(ObjectContainer container) {
+ if(persistent)
+ container.activate(parent, 1);
+ return parent.priorityClass;
+ }
+
+ public SendableGet getRequest(Key key, ObjectContainer container) {
+ int blockNum = this.getBlockNumber(key, container);
+ if(blockNum < 0) return null;
+ int retryCount = getBlockRetryCount(blockNum);
+ return getSubSegment(retryCount, container, false, null);
+ }
+
+ public boolean isCancelled(ObjectContainer container) {
+ return isFinishing(container);
+ }
+
+ public Key[] listKeys(ObjectContainer container) {
+ Vector<Key> v = new Vector<Key>();
+ synchronized(this) {
+ for(int i=0;i<dataKeys.length;i++) {
+ if(dataKeys[i] != null) {
+ if(persistent)
+ container.activate(dataKeys[i],
5);
+ v.add(dataKeys[i].getNodeKey());
+ }
+ }
+ for(int i=0;i<checkKeys.length;i++) {
+ if(checkKeys[i] != null) {
+ if(persistent)
+
container.activate(checkKeys[i], 5);
+ v.add(checkKeys[i].getNodeKey());
+ }
+ }
+ }
+ return v.toArray(new Key[v.size()]);
+ }
+
+ /**
+ * @return True if we fetched a block.
+ * Hold the lock for the whole duration of this method. If a transient
request
+ * has two copies of onGotKey() run in parallel, we want only one of
them to
+ * return true, otherwise SFFKL will remove the keys from the main bloom
+ * filter twice, resulting in collateral damage to other overlapping
keys,
+ * and then "NOT IN BLOOM FILTER" errors, or worse, false negatives.
+ */
+ public boolean onGotKey(Key key, KeyBlock block, ObjectContainer
container, ClientContext context) {
+ ClientCHKBlock cb;
+ int blockNum;
+ Bucket data;
+ SplitFileFetcherSubSegment seg;
+ short onSuccessResult = (short) -1;
+ synchronized(this) {
+ if(finished || startedDecode || fetcherFinished) {
+ return false;
+ }
+ blockNum = this.getBlockNumber(key, container);
+ if(blockNum < 0) return false;
+ if(logMINOR)
+ Logger.minor(this, "Found key for block
"+blockNum+" on "+this+" in onGotKey() for "+key);
+ ClientCHK ckey = this.getBlockKey(blockNum, container);
+ int retryCount = getBlockRetryCount(blockNum);
+ seg = this.getSubSegment(retryCount, container, true,
null);
+ if(persistent)
+ container.activate(seg, 1);
+ if(seg != null) {
+ seg.removeBlockNum(blockNum, container, false);
+ seg.possiblyRemoveFromParent(container,
context);
+ }
+ for(int i=0;i<subSegments.size();i++) {
+ SplitFileFetcherSubSegment checkSeg =
subSegments.get(i);
+ if(checkSeg == seg) continue;
+ if(persistent)
+ container.activate(checkSeg, 1);
+ if(checkSeg.removeBlockNum(blockNum, container,
false))
+ Logger.error(this, "Block number
"+blockNum+" was registered to wrong subsegment "+checkSeg+" should be "+seg);
+ if(persistent)
+ container.deactivate(checkSeg, 1);
+ }
+ if(persistent)
+ container.deactivate(seg, 1);
+ try {
+ cb = new ClientCHKBlock((CHKBlock)block, ckey);
+ } catch (CHKVerifyException e) {
+ this.onFatalFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e), blockNum, null,
container, context);
+ return false;
+ }
+ data = extract(cb, blockNum, container, context);
+ if(data == null) {
+ if(logMINOR)
+ Logger.minor(this, "Extract failed");
+ return false;
+ }
+ // This can be done safely inside the lock.
+ if(parent instanceof ClientGetter)
+ ((ClientGetter)parent).addKeyToBinaryBlob(cb,
container, context);
+ if(!cb.isMetadata()) {
+ // We MUST remove the keys before we exit the
synchronized block,
+ // thus ensuring that the next call will return
FALSE, and the keys
+ // will only be removed from the Bloom filter
ONCE!
+ onSuccessResult = onSuccessInner(data,
blockNum, cb, container, context, seg);
+ }
+ }
+ if(!cb.isMetadata()) {
+ if(onSuccessResult != (short) -1)
+ finishOnSuccess(onSuccessResult, container,
context);
+ return true;
+ } else {
+ onFatalFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), blockNum, null, container, context);
+ return true;
+ }
+ }
+
+ private int getBlockRetryCount(int blockNum) {
+ if(blockNum < dataRetries.length)
+ return dataRetries[blockNum];
+ blockNum -= dataRetries.length;
+ return checkRetries[blockNum];
+ }
+
+ /** Convert a ClientKeyBlock to a Bucket. If an error occurs, report it
via onFailure
+ * and return null.
+ */
+ protected Bucket extract(ClientKeyBlock block, int blockNum,
ObjectContainer container, ClientContext context) {
+ Bucket data;
+ try {
+ data =
block.decode(context.getBucketFactory(persistent),
(int)(Math.min(this.blockFetchContext.maxOutputLength, Integer.MAX_VALUE)),
false);
+ } catch (KeyDecodeException e1) {
+ if(logMINOR)
+ Logger.minor(this, "Decode failure: "+e1, e1);
+ this.onFatalFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), blockNum,
null, container, context);
+ return null;
+ } catch (TooBigException e) {
+ this.onFatalFailure(new
FetchException(FetchException.TOO_BIG, e.getMessage()), blockNum, null,
container, context);
+ return null;
+ } catch (IOException e) {
+ Logger.error(this, "Could not capture data - disk
full?: "+e, e);
+ this.onFatalFailure(new
FetchException(FetchException.BUCKET_ERROR, e), blockNum, null, container,
context);
+ return null;
+ }
+ if(logMINOR)
+ Logger.minor(this, data == null ? "Could not decode:
null" : ("Decoded "+data.size()+" bytes"));
+ return data;
+ }
+
+
+ public boolean persistent() {
+ return persistent;
+ }
+
+ public void deactivateKeys(ObjectContainer container) {
+ for(int i=0;i<dataKeys.length;i++)
+ container.deactivate(dataKeys[i], 1);
+ for(int i=0;i<checkKeys.length;i++)
+ container.deactivate(checkKeys[i], 1);
+ }
+
+ public SplitFileFetcherSubSegment getSubSegmentFor(int blockNum,
ObjectContainer container) {
+ return getSubSegment(getBlockRetryCount(blockNum), container,
false, null);
+ }
+
+ public void freeDecodedData(ObjectContainer container) {
+ if(persistent)
+ container.activate(decodedData, 1);
decodedData.free();
+ if(persistent)
+ decodedData.removeFrom(container);
decodedData = null;
+ if(persistent)
+ container.store(this);
}
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "removing "+this);
+ if(decodedData != null)
+ freeDecodedData(container);
+ removeSubSegments(container, context, true);
+ container.delete(subSegments);
+ for(int i=0;i<dataKeys.length;i++) {
+ if(dataKeys[i] != null)
dataKeys[i].removeFrom(container);
+ dataKeys[i] = null;
+ }
+ for(int i=0;i<checkKeys.length;i++) {
+ if(checkKeys[i] != null)
checkKeys[i].removeFrom(container);
+ checkKeys[i] = null;
+ }
+ for(int i=0;i<dataBuckets.length;i++) {
+ MinimalSplitfileBlock block = dataBuckets[i];
+ if(block == null) continue;
+ if(block.data != null) {
+ Logger.error(this, "Data block "+i+" still
present in removeFrom()! on "+this);
+ block.data.free();
+ }
+ block.removeFrom(container);
+ }
+ for(int i=0;i<checkBuckets.length;i++) {
+ MinimalSplitfileBlock block = checkBuckets[i];
+ if(block == null) continue;
+ if(block.data != null) {
+ Logger.error(this, "Check block "+i+" still
present in removeFrom()! on "+this);
+ block.data.free();
+ }
+ block.removeFrom(container);
+ }
+ container.activate(errors, 1);
+ errors.removeFrom(container);
+ if(failureException != null) {
+ container.activate(failureException, 5);
+ failureException.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ public void fetcherFinished(ObjectContainer container, ClientContext
context) {
+ synchronized(this) {
+ fetcherFinished = true;
+ if(!encoderFinished) {
+ if(!startedDecode) {
+ encoderFinished = true;
+ container.store(this);
+ } else {
+ container.store(this);
+ if(logMINOR) Logger.minor(this,
"Fetcher finished but encoder not finished on "+this);
+ return;
+ }
+ }
+ }
+ removeFrom(container, context);
+ }
+
+ private void encoderFinished(ObjectContainer container, ClientContext
context) {
+ synchronized(this) {
+ encoderFinished = true;
+ if(!fetcherFinished) {
+ container.store(this);
+ if(logMINOR) Logger.minor(this, "Encoder
finished but fetcher not finished on "+this);
+ return;
+ }
+ }
+ removeFrom(container, context);
+ }
}
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSubSegment.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,8 +1,13 @@
package freenet.client.async;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.keys.CHKBlock;
@@ -15,10 +20,14 @@
import freenet.keys.KeyBlock;
import freenet.keys.KeyDecodeException;
import freenet.keys.TooBigException;
+import freenet.node.BulkCallFailureItem;
import freenet.node.KeysFetchingLocally;
import freenet.node.LowLevelGetException;
+import freenet.node.RequestClient;
import freenet.node.RequestScheduler;
import freenet.node.SendableGet;
+import freenet.node.SendableRequestItem;
+import freenet.node.SupportsBulkCallFailure;
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -31,7 +40,7 @@
* LOCKING: Synchronize on the parent segment. Nothing else makes sense w.r.t.
nested locking.
* Note that SendableRequest will occasionally lock on (this). That lock is
always taken last.
*/
-public class SplitFileFetcherSubSegment extends SendableGet {
+public class SplitFileFetcherSubSegment extends SendableGet implements
SupportsBulkCallFailure {
final int retryCount;
final SplitFileFetcherSegment segment;
@@ -41,22 +50,30 @@
* chooseKey() and allKeys() work / work fast. The retries tables in
the Segment are
* canonical.
*/
- final Vector blockNums;
+ final Vector<Integer> blockNums;
final FetchContext ctx;
private static boolean logMINOR;
private boolean cancelled;
- SplitFileFetcherSubSegment(SplitFileFetcherSegment segment, int
retryCount) {
- super(segment.parentFetcher.parent);
+ SplitFileFetcherSubSegment(SplitFileFetcherSegment segment,
ClientRequester parent, int retryCount) {
+ super(parent);
this.segment = segment;
this.retryCount = retryCount;
+ if(parent == null) throw new NullPointerException();
ctx = segment.blockFetchContext;
- blockNums = new Vector();
+ blockNums = new Vector<Integer>();
logMINOR = Logger.shouldLog(Logger.MINOR, this);
}
@Override
- public boolean dontCache() {
+ public boolean dontCache(ObjectContainer container) {
+ if(persistent) container.activate(ctx, 1);
+ if(ctx == null) {
+ if(segment != null)
+ Logger.error(this, "CTX=NULL BUT SEGMENT !=
NULL!");
+ else
+ Logger.error(this, "CTX=NULL AND SEGMENT = NULL
on "+this);
+ }
return !ctx.cacheLocalRequests;
}
@@ -66,24 +83,28 @@
}
@Override
- public Object chooseKey(KeysFetchingLocally keys) {
+ public SendableRequestItem chooseKey(KeysFetchingLocally keys,
ObjectContainer container, ClientContext context) {
if(cancelled) return null;
- return removeRandomBlockNum(keys);
+ return getRandomBlockNum(keys, context, container);
}
@Override
- public ClientKey getKey(Object token) {
+ public ClientKey getKey(Object token, ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ }
synchronized(segment) {
if(cancelled) {
if(logMINOR)
Logger.minor(this, "Segment is
finishing when getting key "+token+" on "+this);
return null;
}
- ClientKey key =
segment.getBlockKey(((Integer)token).intValue());
+ ClientKey key =
segment.getBlockKey(((MySendableRequestItem)token).x, container);
if(key == null) {
- if(segment.isFinished()) {
+ if(segment.isFinished(container)) {
Logger.error(this, "Segment finished
but didn't tell us! "+this);
- } else if(segment.isFinishing()) {
+ } else if(segment.isFinishing(container)) {
Logger.error(this, "Segment finishing
but didn't tell us! "+this);
} else {
Logger.error(this, "Segment not
finishing yet still returns null for getKey()!: "+token+" for "+this, new
Exception("debug"));
@@ -98,22 +119,61 @@
* those on cooldown queues. This is important when unregistering.
*/
@Override
- public Object[] allKeys() {
+ public SendableRequestItem[] allKeys(ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ }
// j16sdiz (22-DEC-2008):
// ClientRequestSchedular.removePendingKeys() call this to get
a list of request to be removed
// FIXME ClientRequestSchedular.removePendingKeys() is leaking,
what's missing here?
- return segment.getKeyNumbersAtRetryLevel(retryCount);
+ return
convertIntegerToMySendableRequestItems(segment.getKeyNumbersAtRetryLevel(retryCount));
}
/**
* Just those keys which are eligible to be started now.
*/
@Override
- public Object[] sendableKeys() {
- return blockNums.toArray();
+ public SendableRequestItem[] sendableKeys(ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(blockNums, 1);
+ }
+ cleanBlockNums(container);
+ return
convertIntegerToMySendableRequestItems((Integer[])blockNums.toArray());
}
- private Object removeRandomBlockNum(KeysFetchingLocally keys) {
+ private SendableRequestItem[]
convertIntegerToMySendableRequestItems(Integer[] nums) {
+ SendableRequestItem[] wrapped = new
SendableRequestItem[nums.length];
+ for(int i=0;i<nums.length;i++)
+ wrapped[i] = new MySendableRequestItem(nums[i]);
+ return wrapped;
+ }
+
+ private void cleanBlockNums(ObjectContainer container) {
+ synchronized(segment) {
+ int initSize = blockNums.size();
+ Integer prev = null;
+ for(int i=0;i<blockNums.size();i++) {
+ Integer x = (Integer) blockNums.get(i);
+ if(x == prev || x.equals(prev)) {
+ blockNums.remove(i);
+ i--;
+ if(persistent) container.delete(x);
+ } else prev = x;
+ }
+ if(blockNums.size() < initSize) {
+ Logger.error(this, "Cleaned block number list
duplicates: was "+initSize+" now "+blockNums.size());
+ }
+ }
+ }
+
+ private SendableRequestItem getRandomBlockNum(KeysFetchingLocally keys,
ClientContext context, ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(blockNums, 1);
+ container.activate(segment, 1);
+ }
logMINOR = Logger.shouldLog(Logger.MINOR, this);
synchronized(segment) {
if(blockNums.isEmpty()) {
@@ -122,50 +182,90 @@
return null;
}
for(int i=0;i<10;i++) {
- Object ret;
+ Integer ret;
int x;
- x = ctx.random.nextInt(blockNums.size());
- ret = (Integer) blockNums.remove(x);
- Key key =
segment.getBlockNodeKey(((Integer)ret).intValue());
+ if(blockNums.size() == 0) return null;
+ x = context.random.nextInt(blockNums.size());
+ ret = blockNums.get(x);
+ int num = ret;
+ Key key = segment.getBlockNodeKey(num,
container);
if(key == null) {
- if(segment.isFinishing() ||
segment.isFinished()) return null;
- Logger.error(this, "Key is null for
block "+ret+" for "+this);
+ if(segment.isFinishing(container) ||
segment.isFinished(container)) return null;
+ if(segment.haveBlock(num, container))
+ Logger.error(this, "Already
have block "+ret+" but was in blockNums on "+this);
+ else
+ Logger.error(this, "Key is null
for block "+ret+" for "+this);
continue;
}
if(keys.hasKey(key)) {
- blockNums.add(ret);
continue;
}
if(logMINOR)
Logger.minor(this, "Removing block
"+x+" of "+(blockNums.size()+1)+ " : "+ret+ " on "+this);
- return ret;
+ return new MySendableRequestItem(num);
}
return null;
}
}
+
+ private class MySendableRequestItem implements SendableRequestItem {
+ final int x;
+ MySendableRequestItem(int x) {
+ this.x = x;
+ }
+ public void dump() {
+ // Ignore, we will be GC'ed
+ }
+ }
@Override
- public boolean hasValidKeys(KeysFetchingLocally keys) {
+ public boolean hasValidKeys(KeysFetchingLocally keys, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(blockNums, 1);
+ container.activate(segment, 1);
+ }
+ boolean hasSet = false;
+ boolean retval = false;
synchronized(segment) {
for(int i=0;i<10;i++) {
- Object ret;
+ Integer ret;
int x;
- if(blockNums.isEmpty()) return false;
- x = ctx.random.nextInt(blockNums.size());
- ret = (Integer) blockNums.get(x);
- Key key =
segment.getBlockNodeKey(((Integer)ret).intValue());
+ if(blockNums.isEmpty()) {
+ break;
+ }
+ x = context.random.nextInt(blockNums.size());
+ ret = blockNums.get(x);
+ int block = ret;
+ Key key = segment.getBlockNodeKey(block,
container);
if(key == null) {
- Logger.error(this, "Key is null for
block "+ret+" for "+this+" in hasValidKeys()");
+ if(segment.isFinishing(container) ||
segment.isFinished(container)) return false;
+ if(segment.haveBlock(block, container))
+ Logger.error(this, "Already
have block "+ret+" but was in blockNums on "+this+" in hasValidKeys");
+ else
+ Logger.error(this, "Key is null
for block "+ret+" for "+this+" in hasValidKeys");
blockNums.remove(x);
+ if(persistent) {
+ container.delete(ret);
+ if(!hasSet) {
+ hasSet = true;
+
container.store(blockNums);
+ }
+ }
continue;
}
if(keys.hasKey(key)) {
continue;
}
- return true;
+ retval = true;
+ break;
}
- return false;
}
+ if(persistent) {
+ container.deactivate(blockNums, 5);
+ container.deactivate(segment, 1);
+ }
+ return retval;
}
@Override
@@ -173,52 +273,108 @@
return ctx.ignoreStore;
}
- // Translate it, then call the real onFailure
+ // SendableGet has a hashCode() and inherits equals(), which is
consistent with the hashCode().
+
+ public void onFailure(BulkCallFailureItem[] items, ObjectContainer
container, ClientContext context) {
+ FetchException[] fetchExceptions = new
FetchException[items.length];
+ int countFatal = 0;
+ if(persistent) {
+ container.activate(blockNums, 2);
+ }
+ for(int i=0;i<items.length;i++) {
+ fetchExceptions[i] = translateException(items[i].e);
+ if(fetchExceptions[i].isFatal()) countFatal++;
+
removeBlockNum(((MySendableRequestItem)items[i].token).x, container, true);
+ }
+ if(persistent) {
+ container.store(blockNums);
+ container.deactivate(blockNums, 2);
+ container.activate(segment, 1);
+ container.activate(parent, 1);
+ container.activate(segment.errors, 1);
+ }
+ if(parent.isCancelled()) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Failing: cancelled");
+ // Fail the segment.
+ segment.fail(new
FetchException(FetchException.CANCELLED), container, context, false);
+ // FIXME do we need to free the keyNum's??? Or will
that happen later anyway?
+ return;
+ }
+ for(int i=0;i<fetchExceptions.length;i++)
+ segment.errors.inc(fetchExceptions[i].getMode());
+ int nonFatalExceptions = items.length - countFatal;
+ int[] blockNumbers = new int[nonFatalExceptions];
+ if(countFatal > 0) {
+ FetchException[] newFetchExceptions = new
FetchException[items.length - countFatal];
+ // Call the fatal callbacks directly.
+ int x = 0;
+ for(int i=0;i<items.length;i++) {
+ int blockNum =
((MySendableRequestItem)items[i].token).x;
+ if(fetchExceptions[i].isFatal()) {
+
segment.onFatalFailure(fetchExceptions[i], blockNum, this, container, context);
+ } else {
+ blockNumbers[x] = blockNum;
+ newFetchExceptions[x] =
fetchExceptions[i];
+ x++;
+ }
+ }
+ fetchExceptions = newFetchExceptions;
+ } else {
+ for(int i=0;i<blockNumbers.length;i++)
+ blockNumbers[i] =
((MySendableRequestItem)items[i].token).x;
+ }
+ segment.onNonFatalFailure(fetchExceptions, blockNumbers, this,
container, context);
+
+ if(persistent) {
+ container.deactivate(segment, 1);
+ container.deactivate(parent, 1);
+ container.deactivate(segment.errors, 1);
+ }
+ }
+
// FIXME refactor this out to a common method; see
SimpleSingleFileFetcher
- @Override
- public void onFailure(LowLevelGetException e, Object token,
RequestScheduler sched) {
- if(logMINOR)
- Logger.minor(this, "onFailure("+e+" , "+token);
+ private FetchException translateException(LowLevelGetException e) {
switch(e.code) {
case LowLevelGetException.DATA_NOT_FOUND:
- onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), token, sched);
- return;
case LowLevelGetException.DATA_NOT_FOUND_IN_STORE:
- onFailure(new
FetchException(FetchException.DATA_NOT_FOUND), token, sched);
- return;
+ return new
FetchException(FetchException.DATA_NOT_FOUND);
case LowLevelGetException.RECENTLY_FAILED:
- onFailure(new
FetchException(FetchException.RECENTLY_FAILED), token, sched);
- return;
+ return new
FetchException(FetchException.RECENTLY_FAILED);
case LowLevelGetException.DECODE_FAILED:
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), token, sched);
- return;
+ return new
FetchException(FetchException.BLOCK_DECODE_ERROR);
case LowLevelGetException.INTERNAL_ERROR:
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR), token, sched);
- return;
+ return new
FetchException(FetchException.INTERNAL_ERROR);
case LowLevelGetException.REJECTED_OVERLOAD:
- onFailure(new
FetchException(FetchException.REJECTED_OVERLOAD), token, sched);
- return;
+ return new
FetchException(FetchException.REJECTED_OVERLOAD);
case LowLevelGetException.ROUTE_NOT_FOUND:
- onFailure(new
FetchException(FetchException.ROUTE_NOT_FOUND), token, sched);
- return;
+ return new
FetchException(FetchException.ROUTE_NOT_FOUND);
case LowLevelGetException.TRANSFER_FAILED:
- onFailure(new
FetchException(FetchException.TRANSFER_FAILED), token, sched);
- return;
+ return new
FetchException(FetchException.TRANSFER_FAILED);
case LowLevelGetException.VERIFY_FAILED:
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR), token, sched);
- return;
+ return new
FetchException(FetchException.BLOCK_DECODE_ERROR);
case LowLevelGetException.CANCELLED:
- onFailure(new FetchException(FetchException.CANCELLED),
token, sched);
- return;
+ return new FetchException(FetchException.CANCELLED);
default:
Logger.error(this, "Unknown LowLevelGetException code:
"+e.code);
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR), token, sched);
- return;
+ return new
FetchException(FetchException.INTERNAL_ERROR, "Unknown error code: "+e.code);
}
}
+ // Translate it, then call the real onFailure
+ public void onFailure(LowLevelGetException e, Object token,
ObjectContainer container, ClientContext context) {
+ if(logMINOR)
+ Logger.minor(this, "onFailure("+e+" , "+token+" on
"+this);
+ onFailure(translateException(e), token, container, context);
+ }
+
// Real onFailure
- protected void onFailure(FetchException e, Object token,
RequestScheduler sched) {
+ protected void onFailure(FetchException e, Object token,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(segment, 1);
+ container.activate(parent, 1);
+ container.activate(segment.errors, 1);
+ }
boolean forceFatal = false;
if(parent.isCancelled()) {
if(Logger.shouldLog(Logger.MINOR, this))
@@ -227,67 +383,97 @@
forceFatal = true;
}
segment.errors.inc(e.getMode());
- if(e.isFatal() || forceFatal) {
- segment.onFatalFailure(e, ((Integer)token).intValue(),
this);
+ if(e.isFatal() && token == null) {
+ segment.fail(e, container, context, false);
+ } else if(e.isFatal() || forceFatal) {
+ segment.onFatalFailure(e,
((MySendableRequestItem)token).x, this, container, context);
} else {
- segment.onNonFatalFailure(e,
((Integer)token).intValue(), this, sched);
+ segment.onNonFatalFailure(e,
((MySendableRequestItem)token).x, this, container, context);
}
+ removeBlockNum(((MySendableRequestItem)token).x, container,
false);
+ if(persistent) {
+ container.deactivate(segment, 1);
+ container.deactivate(parent, 1);
+ container.deactivate(segment.errors, 1);
+ }
}
@Override
- public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, RequestScheduler sched) {
- Bucket data = extract(block, token, sched);
+ public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
+ Bucket data = extract(block, token, container, context);
+ int blockNum = ((MySendableRequestItem)token).x;
if(fromStore) {
// Normally when this method is called the block number
has already
// been removed. However if fromStore=true, it won't
have been, so
// we have to do it. (Check the call trace for why)
+ boolean removed = false;
synchronized(segment) {
for(int i=0;i<blockNums.size();i++) {
- Integer x = (Integer) blockNums.get(i);
+ Integer x = blockNums.get(i);
// Compare by value as sometimes we
will do new Integer(num) in requeueing after cooldown code.
- if(x.equals(token)) {
+ if(x.intValue() == blockNum) {
blockNums.remove(i);
+ if(persistent)
container.delete(x);
if(logMINOR) Logger.minor(this,
"Removed block "+i+" : "+x);
i--;
+ removed = true;
}
}
}
+ if(persistent && removed)
+ container.store(blockNums);
}
if(!block.isMetadata()) {
- onSuccess(data, fromStore, (Integer)token,
((Integer)token).intValue(), block, sched);
+ onSuccess(data, fromStore, blockNum, blockNum, block,
container, context);
} else {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), token, sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), token, container, context);
data.free();
+ if(persistent) data.removeFrom(container);
}
+ if(persistent) {
+ container.deactivate(segment, 1);
+ container.deactivate(blockNums, 1);
+ }
}
- protected void onSuccess(Bucket data, boolean fromStore, Integer token,
int blockNo, ClientKeyBlock block, RequestScheduler sched) {
+ protected void onSuccess(Bucket data, boolean fromStore, Integer token,
int blockNo, ClientKeyBlock block, ObjectContainer container, ClientContext
context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ container.activate(parent, 1);
+ }
if(parent.isCancelled()) {
data.free();
- onFailure(new FetchException(FetchException.CANCELLED),
token, sched);
+ if(persistent) data.removeFrom(container);
+ onFailure(new FetchException(FetchException.CANCELLED),
token, container, context);
return;
}
- segment.onSuccess(data, blockNo, this, block);
+ segment.onSuccess(data, blockNo, block, container, context,
this);
}
/** Convert a ClientKeyBlock to a Bucket. If an error occurs, report it
via onFailure
* and return null.
*/
- protected Bucket extract(ClientKeyBlock block, Object token,
RequestScheduler sched) {
+ protected Bucket extract(ClientKeyBlock block, Object token,
ObjectContainer container, ClientContext context) {
Bucket data;
try {
- data = block.decode(ctx.bucketFactory,
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false);
+ data =
block.decode(context.getBucketFactory(persistent),
(int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false);
} catch (KeyDecodeException e1) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Decode failure: "+e1, e1);
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), token,
sched);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), token,
container, context);
return null;
} catch (TooBigException e) {
- onFailure(new FetchException(FetchException.TOO_BIG,
e.getMessage()), token, sched);
+ onFailure(new FetchException(FetchException.TOO_BIG,
e.getMessage()), token, container, context);
return null;
} catch (IOException e) {
Logger.error(this, "Could not capture data - disk
full?: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), token, sched);
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), token, container, context);
return null;
}
if(Logger.shouldLog(Logger.MINOR, this))
@@ -296,18 +482,20 @@
}
@Override
- public Object getClient() {
- return segment.parentFetcher.parent.getClient();
+ public RequestClient getClient(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1);
+ return parent.getClient();
}
@Override
public ClientRequester getClientRequest() {
- return segment.parentFetcher.parent;
+ return parent;
}
@Override
- public short getPriorityClass() {
- return segment.parentFetcher.parent.priorityClass;
+ public short getPriorityClass(ObjectContainer container) {
+ if(persistent) container.activate(parent, 1);
+ return parent.priorityClass;
}
@Override
@@ -315,26 +503,22 @@
return retryCount;
}
- public boolean canRemove() {
- synchronized(segment) {
- if(blockNums.size() < 2) {
- // Can be removed, if the one key is processed.
- // Once it has been processed, we may need to
be reinstated.
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Can remove "+this+"
in canRemove()");
- return true;
- } else return false;
+ @Override
+ public boolean isCancelled(ObjectContainer container) {
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(segment, 1);
}
- }
-
- @Override
- public boolean isCancelled() {
synchronized(segment) {
- return cancelled;
+ return parent.cancelled;
}
}
- public boolean isEmpty() {
+ public boolean isEmpty(ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(blockNums, 1);
+ }
synchronized(segment) {
return cancelled || blockNums.isEmpty();
}
@@ -345,9 +529,78 @@
// Not allowed in splitfiles
return false;
}
+
+ public void addAll(int blocks, boolean dontSchedule, ObjectContainer
container, ClientContext context, boolean dontComplainOnDupes) {
+ int[] list = new int[blocks];
+ for(int i=0;i<blocks;i++) list[i] = i;
+ addAll(list, dontSchedule, container, context,
dontComplainOnDupes);
+ }
- public void add(int blockNo, boolean dontSchedule) {
+ public void addAll(int[] blocks, boolean dontSchedule, ObjectContainer
container, ClientContext context, boolean dontComplainOnDupes) {
+ if(persistent) {
+// container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "Adding "+blocks+" blocks to
"+this+" dontSchedule="+dontSchedule);
+ boolean schedule = true;
+ synchronized(segment) {
+ if(cancelled)
+ throw new IllegalStateException("Adding blocks
to already cancelled "+this);
+ for(int x=0;x<blocks.length;x++) {
+ int i = blocks[x];
+ Integer ii = new Integer(i);
+ if(blockNums.contains(ii)) {
+ if(!dontComplainOnDupes)
+ Logger.error(this, "Block
numbers already contain block "+i);
+ else if(logMINOR)
+ Logger.minor(this, "Block
numbers already contain block "+i);
+ } else {
+ blockNums.add(ii);
+ }
+ if(dontSchedule) schedule = false;
+ /**
+ * Race condition:
+ *
+ * Starter thread sees there is only one block
on us, so removes us.
+ * Another thread adds a block. We don't
schedule as we now have two blocks.
+ * Starter thread removes us.
+ * Other blocks may be added later, but we are
never rescheduled.
+ *
+ * Fixing this by only removing the
SendableRequest after we've removed the
+ * block is nontrivial with the current code.
+ * So what we do here is simply check whether
we are registered, instead of
+ * checking whether blockNums.size() > 1 as we
used to.
+ */
+ if(schedule && getParentGrabArray() != null) {
+ if(logMINOR) Logger.minor(this,
"Already registered, not scheduling: "+blockNums.size()+" : "+blockNums);
+ schedule = false;
+ }
+
+ }
+ }
+ if(persistent)
+ container.store(blockNums);
+ if(schedule) {
+ // Only need to register once for all the blocks.
+ try {
+ context.getChkFetchScheduler().register(null,
new SendableGet[] { this }, persistent, true, container, null, true);
+ } catch (KeyListenerConstructionException e) {
+ Logger.error(this, "Impossible: "+e+" on
"+this, e);
+ }
+ }
+
+ }
+
+ /**
+ * @return True if the caller should schedule.
+ */
+ public boolean add(int blockNo, boolean dontSchedule, ObjectContainer
container, ClientContext context, boolean dontComplainOnDupes) {
+ if(persistent) {
+// container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Adding block "+blockNo+" to
"+this+" dontSchedule="+dontSchedule);
if(blockNo < 0) throw new IllegalArgumentException();
Integer i = Integer.valueOf(blockNo);
@@ -356,8 +609,14 @@
synchronized(segment) {
if(cancelled)
throw new IllegalStateException("Adding block
"+blockNo+" to already cancelled "+this);
- blockNums.add(i);
- if(dontSchedule) schedule = false;
+ if(blockNums.contains(i)) {
+ if(!dontComplainOnDupes)
+ Logger.error(this, "Block numbers
already contain block "+blockNo);
+ else if(logMINOR)
+ Logger.minor(this, "Block numbers
already contain block "+blockNo);
+ } else {
+ blockNums.add(i);
+ }
/**
* Race condition:
*
@@ -376,108 +635,288 @@
schedule = false;
}
}
- if(schedule) schedule();
- else if(!dontSchedule)
- // Already scheduled, however this key may not be
registered.
-
getScheduler().addPendingKey(segment.getBlockKey(blockNo), this);
+ if(persistent)
+ container.store(blockNums);
+ if(schedule) {
+ if(dontSchedule) return true;
+ try {
+ context.getChkFetchScheduler().register(null,
new SendableGet[] { this }, persistent, true, container, null, true);
+ } catch (KeyListenerConstructionException e) {
+ Logger.error(this, "Impossible: "+e+" on
"+this, e);
+ }
+ }
+ return false;
}
@Override
public String toString() {
- return
super.toString()+":"+retryCount+"/"+segment+'('+blockNums.size()+')';
+ return
super.toString()+":"+retryCount+"/"+segment+'('+(blockNums == null ? "null" :
String.valueOf(blockNums.size()))+"),tempid="+objectHash();
}
- public void possiblyRemoveFromParent() {
+ public void possiblyRemoveFromParent(ObjectContainer container,
ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
if(logMINOR)
Logger.minor(this, "Possibly removing from parent:
"+this);
synchronized(segment) {
- if(!blockNums.isEmpty()) return;
+ if(!blockNums.isEmpty()) {
+ if(persistent) container.deactivate(blockNums,
1);
+ return;
+ }
if(logMINOR)
Logger.minor(this, "Definitely removing from
parent: "+this);
- if(!segment.maybeRemoveSeg(this)) return;
- cancelled = true;
+ if(!segment.maybeRemoveSeg(this, container)) {
+ if(persistent) container.deactivate(blockNums,
1);
+ return;
+ }
}
- unregister(false);
+ kill(container, context, true);
}
- @Override
- public void onGotKey(Key key, KeyBlock block, RequestScheduler sched) {
+ public void onGotKey(Key key, KeyBlock block, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
if(logMINOR) Logger.minor(this, "onGotKey("+key+")");
// Find and remove block if it is on this subsegment. However
it may have been
// removed already.
int blockNo;
synchronized(segment) {
for(int i=0;i<blockNums.size();i++) {
- Integer token = (Integer) blockNums.get(i);
- int num = ((Integer)token).intValue();
- Key k = segment.getBlockNodeKey(num);
+ Integer token = blockNums.get(i);
+ int num = token;
+ Key k = segment.getBlockNodeKey(num, container);
if(k != null && k.equals(key)) {
blockNums.remove(i);
+ if(persistent) container.delete(token);
break;
}
}
- blockNo = segment.getBlockNumber(key);
+ blockNo = segment.getBlockNumber(key, container);
}
if(blockNo == -1) {
Logger.minor(this, "No block found for key "+key+" on
"+this);
return;
}
Integer token = Integer.valueOf(blockNo);
- ClientCHK ckey = (ClientCHK) segment.getBlockKey(blockNo);
+ ClientCHK ckey = (ClientCHK) segment.getBlockKey(blockNo,
container);
ClientCHKBlock cb;
try {
cb = new ClientCHKBlock((CHKBlock)block, ckey);
} catch (CHKVerifyException e) {
- onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e), token, sched);
+ onFailure(new
FetchException(FetchException.BLOCK_DECODE_ERROR, e), token, container,
context);
return;
}
- Bucket data = extract(cb, token, sched);
+ Bucket data = extract(cb, token, container, context);
+ if(data == null) return;
if(!cb.isMetadata()) {
- onSuccess(data, false, (Integer)token,
((Integer)token).intValue(), cb, sched);
+ onSuccess(data, false, (Integer)token,
((Integer)token).intValue(), cb, container, context);
} else {
- onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), token, sched);
+ onFailure(new
FetchException(FetchException.INVALID_METADATA, "Metadata where expected
data"), token, container, context);
}
}
/**
* Terminate a subsegment. Called by the segment, which will have
already removed the
- * subsegment from the list.
+ * subsegment from the list. Will delete the object from the database
if persistent.
*/
- public void kill() {
+ public void kill(ObjectContainer container, ClientContext context,
boolean dontDeactivateSeg) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
if(logMINOR)
Logger.minor(this, "Killing "+this);
// Do unregister() first so can get and unregister each key and
avoid a memory leak
- unregister(false);
+ unregister(container, context);
+ Integer[] oldNums;
synchronized(segment) {
+ oldNums = blockNums.toArray(new
Integer[blockNums.size()]);
blockNums.clear();
cancelled = true;
}
+ if(persistent) {
+ for(Integer i : oldNums) container.delete(i);
+ container.delete(blockNums);
+ container.delete(this);
+ if(!dontDeactivateSeg)
+ container.deactivate(segment, 1);
+ // We do not need to call SendableGet as it has no
internal data structures that need deleting.
+ }
}
@Override
- public long getCooldownWakeup(Object token) {
- return segment.getCooldownWakeup(((Integer)token).intValue());
+ public long getCooldownWakeup(Object token, ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ }
+ long ret =
segment.getCooldownWakeup(((MySendableRequestItem)token).x);
+ return ret;
}
@Override
- public void requeueAfterCooldown(Key key, long time) {
+ public void requeueAfterCooldown(Key key, long time, ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(segment, 1);
+ }
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Requeueing after cooldown "+key+"
for "+this);
- segment.requeueAfterCooldown(key, time);
+ if(!segment.requeueAfterCooldown(key, time, container, context,
this)) {
+ Logger.error(this, "Key was not wanted after cooldown:
"+key+" for "+this+" in requeueAfterCooldown");
+ }
+ if(persistent) {
+ container.deactivate(segment, 1);
+ if(container.ext().isActive(segment))
+ Logger.error(this, "SEGMENT STILL ACTIVE:
"+segment);
+ else
+ if(logMINOR) Logger.minor(this, "Deactivated
segment "+segment);
+ }
}
@Override
- public long getCooldownWakeupByKey(Key key) {
- return segment.getCooldownWakeupByKey(key);
+ public long getCooldownWakeupByKey(Key key, ObjectContainer container) {
+ /* Only deactivate if was deactivated in the first place.
+ * See the removePendingKey() stack trace: Segment is the
listener (getter) ! */
+ boolean activated = false;
+ if(persistent) {
+ activated = container.ext().isActive(segment);
+ if(!activated)
+ container.activate(segment, 1);
+ }
+ long ret = segment.getCooldownWakeupByKey(key, container);
+ if(persistent) {
+ if(!activated)
+ container.deactivate(segment, 1);
+ }
+ return ret;
}
@Override
- public void resetCooldownTimes() {
+ public void resetCooldownTimes(ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(segment, 1);
+ }
synchronized(segment) {
segment.resetCooldownTimes((Integer[])blockNums.toArray(new
Integer[blockNums.size()]));
}
}
+ public void reschedule(ObjectContainer container, ClientContext
context) {
+ try {
+ getScheduler(context).register(null, new SendableGet[]
{ this }, persistent, true, container, segment.blockFetchContext.blocks, true);
+ } catch (KeyListenerConstructionException e) {
+ Logger.error(this, "Impossible: "+e+" on "+this, e);
+ }
+ }
+
+ public boolean removeBlockNum(int blockNum, ObjectContainer container,
boolean callerActivatesAndSets) {
+ if(logMINOR) Logger.minor(this, "Removing block "+blockNum+"
from "+this);
+ if(persistent && !callerActivatesAndSets)
+ container.activate(blockNums, 2);
+ boolean found = false;
+ synchronized(segment) {
+ for(int i=0;i<blockNums.size();i++) {
+ Integer token = blockNums.get(i);
+ int num = token;
+ if(num == blockNum) {
+ blockNums.remove(i);
+ if(persistent) container.delete(token);
+ if(logMINOR) Logger.minor(this,
"Removed block "+blockNum+" from "+this);
+ found = true;
+ break;
+ }
+ }
+ }
+ if(persistent && !callerActivatesAndSets) {
+ container.store(blockNums);
+ container.deactivate(blockNums, 2);
+ }
+ return found;
+ }
+
+ public void removeBlockNums(int[] blockNos, ObjectContainer container) {
+ if(persistent)
+ container.activate(blockNums, 2);
+ boolean store = false;
+ for(int i=0;i<blockNos.length;i++)
+ store |= removeBlockNum(blockNos[i], container, true);
+ if(persistent) {
+ if(store) container.store(blockNums);
+ container.deactivate(blockNums, 2);
+ }
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ if(persistent) {
+ container.activate(segment, 1);
+ container.activate(blockNums, 1);
+ }
+ Integer[] blockNumbers;
+ synchronized(this) {
+ blockNumbers = blockNums.toArray(new
Integer[blockNums.size()]);
+ }
+ ArrayList<PersistentChosenBlock> blocks = new
ArrayList<PersistentChosenBlock>();
+ Arrays.sort(blockNumbers);
+ int prevBlockNumber = -1;
+ for(int i=0;i<blockNumbers.length;i++) {
+ int blockNumber = blockNumbers[i];
+ if(blockNumber == prevBlockNumber) {
+ Logger.error(this, "Duplicate block number in
makeBlocks() in "+this+": two copies of "+blockNumber);
+ continue;
+ }
+ prevBlockNumber = blockNumber;
+ ClientKey key = segment.getBlockKey(blockNumber,
container);
+ if(key == null) {
+ if(logMINOR)
+ Logger.minor(this, "Block
"+blockNumber+" is null, maybe race condition");
+ continue;
+ }
+ key = key.cloneKey();
+ Key k = key.getNodeKey();
+ PersistentChosenBlock block = new
PersistentChosenBlock(false, request, new MySendableRequestItem(blockNumber),
k, key, sched);
+ if(logMINOR) Logger.minor(this, "Created block
"+block+" for block number "+blockNumber+" on "+this);
+ blocks.add(block);
+ }
+ blocks.trimToSize();
+ if(persistent) {
+ container.deactivate(segment, 1);
+ container.deactivate(blockNums, 1);
+ }
+ return blocks;
+ }
+
+ @Override
+ public Key[] listKeys(ObjectContainer container) {
+ boolean activated = false;
+ if(persistent) {
+ activated = container.ext().isActive(segment);
+ if(!activated)
+ container.activate(segment, 1);
+ }
+ Key[] keys = segment.listKeys(container);
+ if(persistent && !activated)
+ container.deactivate(segment, 1);
+ return keys;
+ }
+
+ public int objectHash() {
+ return super.hashCode();
+ }
+
+ public boolean objectCanStore(ObjectContainer container) {
+ if(blockNums == null)
+ throw new NullPointerException("Storing "+this+" but
blockNums == null!");
+ return true;
+ }
}
Modified: trunk/freenet/src/freenet/client/async/SplitFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileInserter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SplitFileInserter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,8 +5,11 @@
import freenet.client.ArchiveManager.ARCHIVE_TYPE;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.ClientMetadata;
import freenet.client.FECCodec;
import freenet.client.FailureCodeTracker;
@@ -15,15 +18,30 @@
import freenet.client.Metadata;
import freenet.keys.CHKBlock;
import freenet.keys.ClientCHK;
+import freenet.node.PrioRunnable;
+import freenet.support.Executor;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
import freenet.support.io.BucketTools;
+import freenet.support.io.NativeThread;
public class SplitFileInserter implements ClientPutState {
private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final BaseClientPutter parent;
final InsertContext ctx;
final PutCompletionCallback cb;
@@ -45,6 +63,16 @@
final ARCHIVE_TYPE archiveType;
private boolean forceEncode;
private final long decompressedLength;
+ final boolean persistent;
+
+ // A persistent hashCode is helpful in debugging, and also means we can
put
+ // these objects into sets etc when we need to.
+
+ private final int hashCode;
+
+ public int hashCode() {
+ return hashCode;
+ }
public SimpleFieldSet getProgressFieldset() {
SimpleFieldSet fs = new SimpleFieldSet(false);
@@ -60,16 +88,16 @@
fs.put("SegmentSize", segmentSize);
fs.put("CheckSegmentSize", checkSegmentSize);
SimpleFieldSet segs = new SimpleFieldSet(false);
- for(int i=0;i<segments.length;i++) {
- segs.put(Integer.toString(i),
segments[i].getProgressFieldset());
- }
+// for(int i=0;i<segments.length;i++) {
+// segs.put(Integer.toString(i),
segments[i].getProgressFieldset());
+// }
segs.put("Count", segments.length);
fs.put("Segments", segs);
return fs;
}
- public SplitFileInserter(BaseClientPutter put, PutCompletionCallback
cb, Bucket data, COMPRESSOR_TYPE bestCodec, long decompressedLength,
ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, boolean
isMetadata, Object token, ARCHIVE_TYPE archiveType, boolean freeData) throws
InsertException {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public SplitFileInserter(BaseClientPutter put, PutCompletionCallback
cb, Bucket data, COMPRESSOR_TYPE bestCodec, long decompressedLength,
ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, boolean
isMetadata, Object token, ARCHIVE_TYPE archiveType, boolean freeData, boolean
persistent, ObjectContainer container, ClientContext context) throws
InsertException {
+ hashCode = super.hashCode();
this.parent = put;
this.archiveType = archiveType;
this.compressionCodec = bestCodec;
@@ -84,12 +112,16 @@
this.dataLength = data.size();
Bucket[] dataBuckets;
try {
- dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory, freeData);
- if(dataBuckets[dataBuckets.length-1].size() <
CHKBlock.DATA_LENGTH) {
- Bucket oldData =
dataBuckets[dataBuckets.length-1];
- dataBuckets[dataBuckets.length-1] =
BucketTools.pad(oldData, CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory,
(int) oldData.size());
- oldData.free();
- }
+ dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, persistent ? ctx.persistentBucketFactory :
context.tempBucketFactory, freeData, persistent, container);
+ if(dataBuckets[dataBuckets.length-1].size() <
CHKBlock.DATA_LENGTH) {
+ Bucket oldData =
dataBuckets[dataBuckets.length-1];
+ dataBuckets[dataBuckets.length-1] =
BucketTools.pad(oldData, CHKBlock.DATA_LENGTH, context.persistentBucketFactory,
(int) oldData.size());
+ if(persistent)
dataBuckets[dataBuckets.length-1].storeTo(container);
+ oldData.free();
+ if(persistent)
oldData.removeFrom(container);
+ }
+ if(logMINOR)
+ Logger.minor(this, "Data size "+data.size()+"
buckets "+dataBuckets.length);
} catch (IOException e) {
throw new InsertException(InsertException.BUCKET_ERROR,
e, null);
}
@@ -99,18 +131,40 @@
segmentSize = ctx.splitfileSegmentDataBlocks;
checkSegmentSize = splitfileAlgorithm ==
Metadata.SPLITFILE_NONREDUNDANT ? 0 : ctx.splitfileSegmentCheckBlocks;
+ this.persistent = persistent;
+ if(persistent) {
+ container.activate(parent, 1);
+ }
+
// Create segments
- segments = splitIntoSegments(segmentSize, dataBuckets);
+ segments = splitIntoSegments(segmentSize, dataBuckets,
context.mainExecutor, container, context, persistent, put);
+ if(persistent) {
+ // Deactivate all buckets, and let dataBuckets be GC'ed
+ for(int i=0;i<dataBuckets.length;i++) {
+ // If we don't set them now, they will be set
when the segment is set, which means they will be set deactivated, and cause
NPEs.
+ dataBuckets[i].storeTo(container);
+ container.deactivate(dataBuckets[i], 1);
+ if(dataBuckets.length > segmentSize) //
Otherwise we are nulling out within the segment
+ dataBuckets[i] = null;
+ }
+ }
+ dataBuckets = null;
int count = 0;
for(int i=0;i<segments.length;i++)
count += segments[i].countCheckBlocks();
countCheckBlocks = count;
// Save progress to disk, don't want to do all that again
(probably includes compression in caller)
- parent.onMajorProgress();
+ parent.onMajorProgress(container);
+ if(persistent) {
+ for(int i=0;i<segments.length;i++) {
+ container.store(segments[i]);
+ container.deactivate(segments[i], 1);
+ }
+ }
}
- public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly,
boolean metadata, Object token, ARCHIVE_TYPE archiveType, SimpleFieldSet fs)
throws ResumeException {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly,
boolean metadata, Object token, ARCHIVE_TYPE archiveType, SimpleFieldSet fs,
ObjectContainer container, ClientContext context) throws ResumeException {
+ hashCode = super.hashCode();
this.parent = parent;
this.archiveType = archiveType;
this.token = token;
@@ -120,6 +174,7 @@
this.getCHKOnly = getCHKOnly;
this.cb = cb;
this.ctx = ctx;
+ this.persistent = parent.persistent();
// Don't read finished, wait for the segmentFinished()'s.
String length = fs.get("DataLength");
if(length == null) throw new ResumeException("No DataLength");
@@ -194,7 +249,7 @@
SimpleFieldSet segment = segFS.subset(index);
segFS.removeSubset(index);
if(segment == null) throw new ResumeException("No
segment "+i);
- segments[i] = new SplitFileInserterSegment(this,
segment, splitfileAlgorithm, ctx, getCHKOnly, i);
+ segments[i] = new SplitFileInserterSegment(this,
persistent, parent, segment, splitfileAlgorithm, ctx, getCHKOnly, i, context,
container);
dataBlocks += segments[i].countDataBlocks();
checkBlocks += segments[i].countCheckBlocks();
}
@@ -206,7 +261,7 @@
/**
* Group the blocks into segments.
*/
- private SplitFileInserterSegment[] splitIntoSegments(int segmentSize,
Bucket[] origDataBlocks) {
+ private SplitFileInserterSegment[] splitIntoSegments(int segmentSize,
Bucket[] origDataBlocks, Executor executor, ObjectContainer container,
ClientContext context, boolean persistent, BaseClientPutter putter) {
int dataBlocks = origDataBlocks.length;
Vector segs = new Vector();
@@ -214,8 +269,7 @@
// First split the data up
if((dataBlocks < segmentSize) || (segmentSize == -1)) {
// Single segment
- FECCodec codec = FECCodec.getCodec(splitfileAlgorithm,
origDataBlocks.length, ctx.executor);
- SplitFileInserterSegment onlySeg = new
SplitFileInserterSegment(this, codec, origDataBlocks, ctx, getCHKOnly, 0);
+ SplitFileInserterSegment onlySeg = new
SplitFileInserterSegment(this, persistent, putter, splitfileAlgorithm,
FECCodec.getCheckBlocks(splitfileAlgorithm, origDataBlocks.length),
origDataBlocks, ctx, getCHKOnly, 0, container);
segs.add(onlySeg);
} else {
int j = 0;
@@ -227,49 +281,72 @@
j = i;
for(int x=0;x<seg.length;x++)
if(seg[x] == null) throw new
NullPointerException("In splitIntoSegs: "+x+" is null of "+seg.length+" of
"+segNo);
- FECCodec codec =
FECCodec.getCodec(splitfileAlgorithm, seg.length, ctx.executor);
- SplitFileInserterSegment s = new
SplitFileInserterSegment(this, codec, seg, ctx, getCHKOnly, segNo);
+ SplitFileInserterSegment s = new
SplitFileInserterSegment(this, persistent, putter, splitfileAlgorithm,
FECCodec.getCheckBlocks(splitfileAlgorithm, seg.length), seg, ctx, getCHKOnly,
segNo, container);
segs.add(s);
if(i == dataBlocks) break;
segNo++;
}
}
- parent.notifyClients();
+ if(persistent)
+ container.activate(parent, 1);
+ parent.notifyClients(container, context);
return (SplitFileInserterSegment[]) segs.toArray(new
SplitFileInserterSegment[segs.size()]);
}
- public void start() throws InsertException {
- for(int i=0;i<segments.length;i++)
- segments[i].start();
+ public void start(ObjectContainer container, final ClientContext
context) throws InsertException {
+ for(int i=0;i<segments.length;i++) {
+ if(persistent)
+ container.activate(segments[i], 1);
+ segments[i].start(container, context);
+ if(persistent)
+ container.deactivate(segments[i], 1);
+ }
+ if(persistent)
+ container.activate(parent, 1);
if(countDataBlocks > 32)
- parent.onMajorProgress();
- parent.notifyClients();
+ parent.onMajorProgress(container);
+ parent.notifyClients(container, context);
}
- public void encodedSegment(SplitFileInserterSegment segment) {
+ public void encodedSegment(SplitFileInserterSegment segment,
ObjectContainer container, ClientContext context) {
if(logMINOR) Logger.minor(this, "Encoded segment
"+segment.segNo+" of "+this);
boolean ret = false;
boolean encode;
synchronized(this) {
encode = forceEncode;
for(int i=0;i<segments.length;i++) {
+ if(segments[i] != segment) {
+ if(persistent)
+ container.activate(segments[i],
1);
+ }
if((segments[i] == null) ||
!segments[i].isEncoded()) {
ret = true;
+ if(segments[i] != segment && persistent)
+
container.deactivate(segments[i], 1);
break;
}
+ if(segments[i] != segment && persistent)
+ container.deactivate(segments[i], 1);
}
}
- if(encode) segment.forceEncode();
+ if(encode) segment.forceEncode(container, context);
if(ret) return;
- cb.onBlockSetFinished(this);
- if(countDataBlocks > 32)
- parent.onMajorProgress();
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onBlockSetFinished(this, container, context);
+ if(persistent)
+ container.deactivate(cb, 1);
+ if(countDataBlocks > 32) {
+ if(persistent)
+ container.activate(parent, 1);
+ parent.onMajorProgress(container);
+ }
}
- public void segmentHasURIs(SplitFileInserterSegment segment) {
+ public void segmentHasURIs(SplitFileInserterSegment segment,
ObjectContainer container, ClientContext context) {
if(logMINOR) Logger.minor(this, "Segment has URIs: "+segment);
synchronized(this) {
if(haveSentMetadata) {
@@ -277,7 +354,12 @@
}
for(int i=0;i<segments.length;i++) {
- if(!segments[i].hasURIs()) {
+ if(persistent)
+ container.activate(segments[i], 1);
+ boolean hasURIs = segments[i].hasURIs();
+ if(persistent && segments[i] != segment)
+ container.deactivate(segments[i], 1);
+ if(!hasURIs) {
if(logMINOR) Logger.minor(this,
"Segment does not have URIs: "+segments[i]);
return;
}
@@ -285,42 +367,85 @@
}
if(logMINOR) Logger.minor(this, "Have URIs from all segments");
- encodeMetadata();
+ encodeMetadata(container, context, segment);
}
- private void encodeMetadata() {
+ private void encodeMetadata(ObjectContainer container, ClientContext
context, SplitFileInserterSegment dontDeactivateSegment) {
boolean missingURIs;
Metadata m = null;
+ ClientCHK[] dataURIs = new ClientCHK[countDataBlocks];
+ ClientCHK[] checkURIs = new ClientCHK[countCheckBlocks];
synchronized(this) {
+ int dpos = 0;
+ int cpos = 0;
+ for(int i=0;i<segments.length;i++) {
+ if(persistent)
+ container.activate(segments[i], 1);
+ ClientCHK[] data = segments[i].getDataCHKs();
+ System.arraycopy(data, 0, dataURIs, dpos,
data.length);
+ dpos += data.length;
+ ClientCHK[] check = segments[i].getCheckCHKs();
+ System.arraycopy(check, 0, checkURIs, cpos,
check.length);
+ cpos += check.length;
+ if(persistent && segments[i] !=
dontDeactivateSegment)
+ container.deactivate(segments[i], 1);
+ }
// Create metadata
- ClientCHK[] dataURIs = getDataCHKs();
- ClientCHK[] checkURIs = getCheckCHKs();
if(logMINOR) Logger.minor(this, "Data URIs:
"+dataURIs.length+", check URIs: "+checkURIs.length);
missingURIs = anyNulls(dataURIs) || anyNulls(checkURIs);
+ if(persistent) {
+ // Copy the URIs. We don't know what the callee
wants the metadata for:
+ // he might well ignore it, as in
SimpleManifestPutter.onMetadata().
+ // This way he doesn't need to worry about
removing them.
+ for(int i=0;i<dataURIs.length;i++) {
+ container.activate(dataURIs[i], 5);
+ dataURIs[i] = dataURIs[i].cloneKey();
+ }
+ for(int i=0;i<checkURIs.length;i++) {
+ container.activate(checkURIs[i], 5);
+ checkURIs[i] = checkURIs[i].cloneKey();
+ }
+ }
+
if(!missingURIs) {
// Create Metadata
- m = new Metadata(splitfileAlgorithm, dataURIs,
checkURIs, segmentSize, checkSegmentSize, cm, dataLength, archiveType,
compressionCodec, decompressedLength, isMetadata);
+ if(persistent) container.activate(cm, 5);
+ ClientMetadata meta = cm;
+ if(persistent) meta = meta == null ? null :
meta.clone();
+ m = new Metadata(splitfileAlgorithm, dataURIs,
checkURIs, segmentSize, checkSegmentSize, meta, dataLength, archiveType,
compressionCodec, decompressedLength, isMetadata);
}
haveSentMetadata = true;
}
if(missingURIs) {
if(logMINOR) Logger.minor(this, "Missing URIs");
// Error
- fail(new
InsertException(InsertException.INTERNAL_ERROR, "Missing URIs after encoding",
null));
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, "Missing URIs after encoding",
null), container, context);
return;
- } else
- cb.onMetadata(m, this);
+ } else {
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onMetadata(m, this, container, context);
+ if(persistent)
+ container.deactivate(cb, 1);
+ }
}
- private void fail(InsertException e) {
+ private void fail(InsertException e, ObjectContainer container,
ClientContext context) {
synchronized(this) {
if(finished) return;
finished = true;
}
- cb.onFailure(e, this);
+ if(persistent) {
+ container.store(this);
+ container.activate(cb, 1);
+ }
+ cb.onFailure(e, this, container, context);
+ if(persistent) {
+ container.deactivate(cb, 1);
+ }
}
// FIXME move this to somewhere
@@ -330,98 +455,87 @@
return false;
}
- private ClientCHK[] getCheckCHKs() {
- // Copy check blocks from each segment into a FreenetURI[].
- ClientCHK[] uris = new ClientCHK[countCheckBlocks];
- int x = 0;
- for(int i=0;i<segments.length;i++) {
- ClientCHK[] segURIs = segments[i].getCheckCHKs();
- if(x + segURIs.length > countCheckBlocks)
- throw new IllegalStateException("x="+x+",
segURIs="+segURIs.length+", countCheckBlocks="+countCheckBlocks);
- System.arraycopy(segURIs, 0, uris, x, segURIs.length);
- x += segURIs.length;
- }
-
- if(uris.length != x)
- throw new IllegalStateException("Total is wrong");
-
- return uris;
- }
-
- private ClientCHK[] getDataCHKs() {
- // Copy check blocks from each segment into a FreenetURI[].
- ClientCHK[] uris = new ClientCHK[countDataBlocks];
- int x = 0;
- for(int i=0;i<segments.length;i++) {
- ClientCHK[] segURIs = segments[i].getDataCHKs();
- if(x + segURIs.length > countDataBlocks)
- throw new IllegalStateException("x="+x+",
segURIs="+segURIs.length+", countDataBlocks="+countDataBlocks);
- System.arraycopy(segURIs, 0, uris, x, segURIs.length);
- x += segURIs.length;
- }
-
- if(uris.length != x)
- throw new IllegalStateException("Total is wrong");
-
- return uris;
- }
-
public BaseClientPutter getParent() {
return parent;
}
- public void segmentFinished(SplitFileInserterSegment segment) {
+ public void segmentFinished(SplitFileInserterSegment segment,
ObjectContainer container, ClientContext context) {
if(logMINOR) Logger.minor(this, "Segment finished: "+segment,
new Exception("debug"));
boolean allGone = true;
- if(countDataBlocks > 32)
- parent.onMajorProgress();
+ if(countDataBlocks > 32) {
+ if(persistent)
+ container.activate(parent, 1);
+ parent.onMajorProgress(container);
+ }
synchronized(this) {
if(finished) {
if(logMINOR) Logger.minor(this, "Finished
already");
return;
}
for(int i=0;i<segments.length;i++) {
+ if(persistent && segments[i] != segment)
+ container.activate(segments[i], 1);
if(!segments[i].isFinished()) {
if(logMINOR) Logger.minor(this,
"Segment not finished: "+i+": "+segments[i]);
allGone = false;
+ if(persistent && segments[i] != segment)
+
container.deactivate(segments[i], 1);
break;
}
+ if(persistent && segments[i] != segment)
+ container.deactivate(segments[i], 1);
}
InsertException e = segment.getException();
if((e != null) && e.isFatal()) {
- cancel();
+ cancel(container, context);
} else {
if(!allGone) return;
}
finished = true;
}
- onAllFinished();
+ if(persistent)
+ container.store(this);
+ onAllFinished(container, context);
}
- public void segmentFetchable(SplitFileInserterSegment segment) {
+ public void segmentFetchable(SplitFileInserterSegment segment,
ObjectContainer container) {
if(logMINOR) Logger.minor(this, "Segment fetchable: "+segment);
synchronized(this) {
if(finished) return;
if(fetchable) return;
for(int i=0;i<segments.length;i++) {
+ if(persistent && segments[i] != segment)
+ container.activate(segments[i], 1);
if(!segments[i].isFetchable()) {
if(logMINOR) Logger.minor(this,
"Segment not fetchable: "+i+": "+segments[i]);
+ if(persistent) {
+ for(int j=0;j<=i;j++) {
+ if(segments[j] ==
segment) continue;
+
container.deactivate(segments[j], 1);
+ }
+ }
return;
}
}
fetchable = true;
}
- cb.onFetchable(this);
+ if(persistent) {
+ container.activate(cb, 1);
+ container.store(this);
+ }
+ cb.onFetchable(this, container);
}
- private void onAllFinished() {
+ private void onAllFinished(ObjectContainer container, ClientContext
context) {
if(logMINOR) Logger.minor(this, "All finished");
try {
// Finished !!
FailureCodeTracker tracker = new
FailureCodeTracker(true);
boolean allSucceeded = true;
for(int i=0;i<segments.length;i++) {
+ if(persistent)
+ container.activate(segments[i], 1);
InsertException e = segments[i].getException();
if(e == null) continue;
if(logMINOR) Logger.minor(this, "Failure on
segment "+i+" : "+segments[i]+" : "+e, e);
@@ -430,29 +544,43 @@
tracker.merge(e.errorCodes);
tracker.inc(e.getMode());
}
+ if(persistent)
+ container.activate(cb, 1);
if(allSucceeded)
- cb.onSuccess(this);
+ cb.onSuccess(this, container, context);
else {
-
cb.onFailure(InsertException.construct(tracker), this);
+
cb.onFailure(InsertException.construct(tracker), this, container, context);
}
} catch (Throwable t) {
// We MUST tell the parent *something*!
Logger.error(this, "Caught "+t, t);
- cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR), this);
+ cb.onFailure(new
InsertException(InsertException.INTERNAL_ERROR), this, container, context);
}
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
+ if(logMINOR)
+ Logger.minor(this, "Cancelling "+this);
synchronized(this) {
if(finished) return;
finished = true;
}
- for(int i=0;i<segments.length;i++)
- segments[i].cancel();
+ if(persistent)
+ container.store(this);
+ for(int i=0;i<segments.length;i++) {
+ if(persistent)
+ container.activate(segments[i], 1);
+ segments[i].cancel(container, context);
+ }
+ // The segments will call segmentFinished, but it will ignore
them because finished=true.
+ // Hence we need to call the callback here, since the caller
expects us to.
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFailure(new InsertException(InsertException.CANCELLED),
this, container, context);
}
- public void schedule() throws InsertException {
- start();
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
+ start(container, context);
}
public Object getToken() {
@@ -464,14 +592,47 @@
}
/** Force the remaining blocks which haven't been encoded so far to be
encoded ASAP. */
- public void forceEncode() {
+ public void forceEncode(ObjectContainer container, ClientContext
context) {
+ if(persistent)
+ container.activate(this, 1);
Logger.minor(this, "Forcing encode on "+this);
synchronized(this) {
forceEncode = true;
}
for(int i=0;i<segments.length;i++) {
- segments[i].forceEncode();
+ if(persistent)
+ container.activate(segments[i], 1);
+ segments[i].forceEncode(container, context);
+ if(persistent)
+ container.deactivate(segments[i], 1);
}
}
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ // parent can remove itself
+ // ctx will be removed by parent
+ // cb will remove itself
+ // cm will be removed by parent
+ // token setter can remove token
+ for(SplitFileInserterSegment segment : segments) {
+ container.activate(segment, 1);
+ segment.removeFrom(container, context);
+ }
+ container.delete(this);
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanUpdate() on "+this, new
Exception("debug"));
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(finished)
+ Logger.error(this, "objectCanNew but finished on
"+this, new Exception("error"));
+ else if(logMINOR)
+ Logger.minor(this, "objectCanNew() on "+this, new
Exception("debug"));
+ return true;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/SplitFileInserterSegment.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,34 +1,63 @@
package freenet.client.async;
+import java.io.IOException;
import java.net.MalformedURLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import com.db4o.ObjectContainer;
+
+import freenet.client.FECCallback;
import freenet.client.FECCodec;
import freenet.client.FECJob;
import freenet.client.FailureCodeTracker;
import freenet.client.InsertContext;
import freenet.client.InsertException;
import freenet.client.Metadata;
-import freenet.client.FECCodec.StandardOnionFECCodecEncoderCallback;
-import freenet.keys.BaseClientKey;
+import freenet.client.SplitfileBlock;
import freenet.keys.CHKBlock;
+import freenet.keys.CHKEncodeException;
import freenet.keys.ClientCHK;
+import freenet.keys.ClientCHKBlock;
import freenet.keys.ClientKey;
import freenet.keys.FreenetURI;
+import freenet.node.KeysFetchingLocally;
+import freenet.node.LowLevelPutException;
+import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
+import freenet.node.RequestScheduler;
+import freenet.node.SendableInsert;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
import freenet.support.Fields;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
+import freenet.support.io.BucketTools;
import freenet.support.io.CannotCreateFromFieldSetException;
-import freenet.support.io.SerializableToFieldSetBucket;
+import freenet.support.io.NativeThread;
import freenet.support.io.SerializableToFieldSetBucketUtil;
-public class SplitFileInserterSegment implements PutCompletionCallback,
StandardOnionFECCodecEncoderCallback {
+public class SplitFileInserterSegment extends SendableInsert implements
FECCallback, Encodeable {
private static volatile boolean logMINOR;
-
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
final SplitFileInserter parent;
+ final BaseClientPutter putter;
- final FECCodec splitfileAlgo;
+ final short splitfileAlgo;
final Bucket[] dataBlocks;
@@ -38,10 +67,23 @@
final ClientCHK[] checkURIs;
- final SingleBlockInserter[] dataBlockInserters;
-
- final SingleBlockInserter[] checkBlockInserters;
-
+ final int[] dataRetries;
+ final int[] checkRetries;
+
+ final int[] dataConsecutiveRNFs;
+ final int[] checkConsecutiveRNFs;
+
+ /** Block numbers not finished */
+ final ArrayList<Integer> blocks;
+
+ final boolean[] dataFinished;
+ final boolean[] checkFinished;
+
+ final boolean[] dataFailed;
+ final boolean[] checkFailed;
+
+ final int maxRetries;
+
final InsertContext blockInsertContext;
final int segNo;
@@ -61,29 +103,44 @@
private final FailureCodeTracker errors;
private int blocksGotURI;
-
+ private int blocksSucceeded;
private int blocksCompleted;
+
+ private final boolean persistent;
+
+ private FECJob encodeJob;
+
- public SplitFileInserterSegment(SplitFileInserter parent,
- FECCodec splitfileAlgo, Bucket[] origDataBlocks,
- InsertContext blockInsertContext, boolean getCHKOnly,
int segNo) {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ public SplitFileInserterSegment(SplitFileInserter parent, boolean
persistent, BaseClientPutter putter,
+ short splitfileAlgo, int checkBlockCount, Bucket[]
origDataBlocks,
+ InsertContext blockInsertContext, boolean getCHKOnly,
int segNo, ObjectContainer container) {
+ super(persistent);
this.parent = parent;
this.getCHKOnly = getCHKOnly;
+ this.persistent = persistent;
this.errors = new FailureCodeTracker(true);
this.blockInsertContext = blockInsertContext;
this.splitfileAlgo = splitfileAlgo;
this.dataBlocks = origDataBlocks;
- int checkBlockCount = splitfileAlgo == null ? 0 : splitfileAlgo
- .countCheckBlocks();
checkBlocks = new Bucket[checkBlockCount];
checkURIs = new ClientCHK[checkBlockCount];
dataURIs = new ClientCHK[origDataBlocks.length];
- dataBlockInserters = new SingleBlockInserter[dataBlocks.length];
- checkBlockInserters = new
SingleBlockInserter[checkBlocks.length];
- parent.parent.addBlocks(dataURIs.length + checkURIs.length);
- parent.parent.addMustSucceedBlocks(dataURIs.length +
checkURIs.length);
+ dataRetries = new int[origDataBlocks.length];
+ checkRetries = new int[checkBlockCount];
+ dataFinished = new boolean[origDataBlocks.length];
+ checkFinished = new boolean[checkBlockCount];
+ dataFailed = new boolean[origDataBlocks.length];
+ checkFailed = new boolean[checkBlockCount];
+ dataConsecutiveRNFs = new int[origDataBlocks.length];
+ checkConsecutiveRNFs = new int[checkBlockCount];
+ blocks = new ArrayList<Integer>();
+ putter.addBlocks(dataURIs.length + checkURIs.length, container);
+ putter.addMustSucceedBlocks(dataURIs.length + checkURIs.length,
container);
this.segNo = segNo;
+ if(persistent) container.activate(blockInsertContext, 1);
+ maxRetries = blockInsertContext.maxInsertRetries;
+ this.putter = putter;
+
}
/**
@@ -91,12 +148,16 @@
*
* @throws ResumeException
*/
- public SplitFileInserterSegment(SplitFileInserter parent,
+ public SplitFileInserterSegment(SplitFileInserter parent, boolean
persistent, BaseClientPutter putter,
SimpleFieldSet fs, short splitfileAlgorithm,
InsertContext ctx,
- boolean getCHKOnly, int segNo) throws ResumeException {
+ boolean getCHKOnly, int segNo, ClientContext context,
ObjectContainer container) throws ResumeException {
+ super(persistent);
this.parent = parent;
+ this.splitfileAlgo = splitfileAlgorithm;
this.getCHKOnly = getCHKOnly;
+ this.persistent = persistent;
this.blockInsertContext = ctx;
+ this.maxRetries = ctx.maxInsertRetries;
this.segNo = segNo;
if (!"SplitFileInserterSegment".equals(fs.get("Type")))
throw new ResumeException("Wrong Type: " +
fs.get("Type"));
@@ -130,7 +191,12 @@
dataBlocks = new Bucket[dataBlockCount];
dataURIs = new ClientCHK[dataBlockCount];
- dataBlockInserters = new SingleBlockInserter[dataBlockCount];
+ dataRetries = new int[dataBlockCount];
+ dataConsecutiveRNFs = new int[dataBlockCount];
+ dataFinished = new boolean[dataBlockCount];
+ dataFailed = new boolean[dataBlockCount];
+ blocks = new ArrayList<Integer>();
+ this.putter = putter;
// Check blocks first, because if there are missing check
blocks, we
// need
@@ -151,7 +217,10 @@
}
checkBlocks = new Bucket[checkBlockCount];
checkURIs = new ClientCHK[checkBlockCount];
- checkBlockInserters = new
SingleBlockInserter[checkBlockCount];
+ checkRetries = new int[checkBlockCount];
+ checkConsecutiveRNFs = new int[checkBlockCount];
+ checkFinished = new boolean[checkBlockCount];
+ checkFailed = new boolean[checkBlockCount];
for (int i = 0; i < checkBlockCount; i++) {
String index = Integer.toString(i);
SimpleFieldSet blockFS = checkFS.subset(index);
@@ -189,7 +258,7 @@
if (bucketFS != null) {
try {
checkBlocks[i] =
SerializableToFieldSetBucketUtil
-
.create(bucketFS, ctx.random,
+
.create(bucketFS, context.random,
ctx.persistentFileTracker);
if (logMINOR)
Logger.minor(this, "Check block " + i + " : "
@@ -220,8 +289,6 @@
}
checkFS.removeSubset(index);
}
- splitfileAlgo = FECCodec.getCodec(splitfileAlgorithm,
- dataBlockCount, checkBlocks.length,
ctx.executor);
if(checkBlocks.length > dataBlocks.length) {
// Work around 1135 bug.
@@ -231,12 +298,15 @@
} else {
Logger.normal(this, "Not encoded because no check
blocks");
encoded = false;
- splitfileAlgo = FECCodec.getCodec(splitfileAlgorithm,
- dataBlockCount, ctx.executor);
- int checkBlocksCount = splitfileAlgo.countCheckBlocks();
+ FECCodec splitfileAlgo =
FECCodec.getCodec(splitfileAlgorithm,
+ dataBlockCount);
+ int checkBlocksCount =splitfileAlgo.countCheckBlocks();
this.checkURIs = new ClientCHK[checkBlocksCount];
this.checkBlocks = new Bucket[checkBlocksCount];
- this.checkBlockInserters = new
SingleBlockInserter[checkBlocksCount];
+ checkRetries = new int[checkBlocksCount];
+ checkConsecutiveRNFs = new int[checkBlocksCount];
+ checkFinished = new boolean[checkBlocksCount];
+ checkFailed = new boolean[checkBlocksCount];
hasURIs = false;
}
@@ -279,7 +349,7 @@
} else {
try {
dataBlocks[i] =
SerializableToFieldSetBucketUtil.create(
- bucketFS, ctx.random,
ctx.persistentFileTracker);
+ bucketFS,
context.random, ctx.persistentFileTracker);
if (logMINOR)
Logger.minor(this, "Data block
" + i + " : "
+
dataBlocks[i]);
@@ -308,202 +378,276 @@
throw new ResumeException("Missing data
block " + i
+ " and need to
reconstruct check blocks");
}
- parent.parent.addBlocks(dataURIs.length + checkURIs.length);
- parent.parent.addMustSucceedBlocks(dataURIs.length +
checkURIs.length);
+ putter.addBlocks(dataURIs.length + checkURIs.length, container);
+ putter.addMustSucceedBlocks(dataURIs.length + checkURIs.length,
container);
}
- public synchronized SimpleFieldSet getProgressFieldset() {
- SimpleFieldSet fs = new SimpleFieldSet(false); // these get BIG
- fs.putSingle("Type", "SplitFileInserterSegment");
- fs.put("Finished", finished);
- // If true, check blocks which are null are finished
- fs.put("Encoded", encoded);
- // If true, data blocks which are null are finished
- fs.put("Started", started);
- fs.tput("Errors", errors.toFieldSet(false));
- SimpleFieldSet dataFS = new SimpleFieldSet(false);
- dataFS.put("Count", dataBlocks.length);
- for (int i = 0; i < dataBlocks.length; i++) {
- SimpleFieldSet block = new SimpleFieldSet(false);
- if (dataURIs[i] != null)
- block.putSingle("URI",
dataURIs[i].getURI().toString());
- SingleBlockInserter sbi = dataBlockInserters[i];
- // If started, then sbi = null => block finished.
- boolean finished = started && sbi == null;
- if (started) {
- block.put("Finished", finished);
- }
- Bucket data = dataBlocks[i];
- if (data == null && finished) {
- // Ignore
- if (logMINOR)
- Logger.minor(this, "Could not save to
disk bucket: null");
- } else if (data instanceof
SerializableToFieldSetBucket) {
- SimpleFieldSet tmp =
((SerializableToFieldSetBucket) data).toFieldSet();
- if (tmp == null) {
- if (logMINOR)
- Logger.minor(this, "Could not
save to disk data: " + data);
- return null;
- }
- block.put("Data", tmp);
- } else {
- if (logMINOR)
- Logger.minor(this,
- "Could not save to disk
(not serializable to fieldset): " + data);
- return null;
- }
- if (!block.isEmpty())
- dataFS.put(Integer.toString(i), block);
+ public void start(ObjectContainer container, ClientContext context)
throws InsertException {
+ // Always called by parent, so don't activate or deactivate
parent.
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(parent.parent, 1);
+ container.activate(blocks, 2);
}
- fs.put("DataBlocks", dataFS);
- SimpleFieldSet checkFS = new SimpleFieldSet(false);
- checkFS.put("Count", checkBlocks.length);
- for (int i = 0; i < checkBlocks.length; i++) {
- SimpleFieldSet block = new SimpleFieldSet(false);
- if (checkURIs[i] != null)
- block.putSingle("URI",
checkURIs[i].getURI().toString());
- SingleBlockInserter sbi = checkBlockInserters[i];
- // If encoded, then sbi == null => block finished
- boolean finished = encoded && sbi == null &&
checkURIs[i] != null;
- if (encoded) {
- block.put("Finished", finished);
- }
- if (!finished) {
- Bucket data = checkBlocks[i];
- if (data != null
- && data instanceof
SerializableToFieldSetBucket) {
- SimpleFieldSet tmp =
((SerializableToFieldSetBucket) data)
- .toFieldSet();
- if (tmp == null)
- Logger.error(this, "Could not
serialize " + data
- + " - check
block " + i + " of " + segNo);
- else
- block.put("Data", tmp);
- } else if (encoded) {
- Logger.error(this,
- "Could not save to disk
(null or not serializable to fieldset) encoded="+encoded+" finished="+finished
+ " checkURI[i]="+checkURIs[i]+" : "
- + data,
new Exception());
- return null;
- }
- }
- if (!block.isEmpty())
- checkFS.put(Integer.toString(i), block);
- }
- fs.put("CheckBlocks", checkFS);
- return fs;
- }
-
- public void start() throws InsertException {
- if (logMINOR)
+ if (logMINOR) {
+ if(parent == null) throw new NullPointerException();
Logger.minor(this, "Starting segment " + segNo + " of "
+ parent
+ " (" + parent.dataLength + "): " +
this + " ( finished="
+ finished + " encoded=" + encoded + "
hasURIs=" + hasURIs
- + ')');
+ + " persistent=" + persistent + ')');
+ }
boolean fin = true;
- for (int i = 0; i < dataBlockInserters.length; i++) {
+ for (int i = 0; i < dataBlocks.length; i++) {
if (dataBlocks[i] != null) { // else already finished
on creation
- dataBlockInserters[i] = new
SingleBlockInserter(parent.parent,
- dataBlocks[i], (short) -1,
FreenetURI.EMPTY_CHK_URI,
- blockInsertContext, this,
false, CHKBlock.DATA_LENGTH,
- i, getCHKOnly, false, false,
parent.token, false);
- dataBlockInserters[i].schedule();
fin = false;
+ synchronized(this) {
+ blocks.add(i);
+ }
} else {
- parent.parent.completedBlock(true);
+ parent.parent.completedBlock(true, container,
context);
}
}
// parent.parent.notifyClients();
started = true;
+ FECJob job = null;
+ FECCodec splitfileAlgo = null;
if (!encoded) {
if (logMINOR)
Logger.minor(this, "Segment " + segNo + " of "
+ parent + " ("
+ parent.dataLength + ") is not
encoded");
- if (splitfileAlgo != null) {
+ splitfileAlgo = FECCodec.getCodec(this.splitfileAlgo,
+ dataBlocks.length, checkBlocks.length);
if (logMINOR)
Logger.minor(this, "Encoding segment "
+ segNo + " of "
- + parent + " (" +
parent.dataLength + ')');
+ + parent + " (" +
parent.dataLength + ") persistent="+persistent);
// Encode blocks
synchronized(this) {
if(!encoded){
- splitfileAlgo.addToQueue(new
FECJob(splitfileAlgo, dataBlocks, checkBlocks, CHKBlock.DATA_LENGTH,
blockInsertContext.persistentBucketFactory, this, false));
+ // FIXME necessary??? the queue
is persistence aware, won't it activate them...?
+ if(persistent) {
+ for(int
i=0;i<dataBlocks.length;i++)
+
container.activate(dataBlocks[i], 5);
+ }
+ job = encodeJob = new
FECJob(splitfileAlgo, context.fecQueue, dataBlocks, checkBlocks,
CHKBlock.DATA_LENGTH, persistent ? blockInsertContext.persistentBucketFactory :
context.tempBucketFactory, this, false, parent.parent.getPriorityClass(),
persistent);
}
}
fin = false;
- }
} else {
- for (int i = 0; i < checkBlockInserters.length; i++) {
+ for (int i = 0; i < checkBlocks.length; i++) {
if (checkBlocks[i] != null) {
- checkBlockInserters[i] = new
SingleBlockInserter(
- parent.parent,
checkBlocks[i], (short) -1,
-
FreenetURI.EMPTY_CHK_URI, blockInsertContext, this,
- false,
CHKBlock.DATA_LENGTH, i + dataBlocks.length,
- getCHKOnly, false,
false, parent.token, false);
- checkBlockInserters[i].schedule();
+ synchronized(this) {
+ blocks.add(i +
dataBlocks.length);
+ }
fin = false;
} else
- parent.parent.completedBlock(true);
+ parent.parent.completedBlock(true,
container, context);
}
- onEncodedSegment();
+ onEncodedSegment(container, context, null, dataBlocks,
checkBlocks, null, null);
}
if (hasURIs) {
- parent.segmentHasURIs(this);
+ parent.segmentHasURIs(this, container, context);
}
boolean fetchable;
synchronized (this) {
fetchable = (blocksCompleted > dataBlocks.length);
}
+ if(persistent) {
+ container.store(this);
+ container.store(blocks);
+ }
if (fetchable)
- parent.segmentFetchable(this);
+ parent.segmentFetchable(this, container);
if (fin)
- finish();
+ finish(container, context, parent);
+ else
+ schedule(container, context);
if (finished) {
- parent.segmentFinished(this);
+ finish(container, context, parent);
}
+ if(job != null) {
+ splitfileAlgo.addToQueue(job, context.fecQueue,
container);
+ }
}
- public void onDecodedSegment() {} // irrevelant
+ private void schedule(ObjectContainer container, ClientContext context)
{
+ if(!getCHKOnly) {
+ this.getScheduler(context).registerInsert(this,
persistent, false, container);
+ } else {
+ tryEncode(container, context);
+ }
+ }
- public void onEncodedSegment() {
+ public void tryEncode(ObjectContainer container, ClientContext context)
{
+ for(int i=0;i<dataBlocks.length;i++) {
+ if(dataURIs[i] == null && dataBlocks[i] != null) {
+ try {
+ boolean deactivate = false;
+ if(persistent) {
+ deactivate =
!container.ext().isActive(dataBlocks[i]);
+ if(deactivate)
container.activate(dataBlocks[i], 1);
+ }
+ ClientCHK key = (ClientCHK)
encodeBucket(dataBlocks[i]).getClientKey();
+ if(deactivate)
container.deactivate(dataBlocks[i], 1);
+ onEncode(i, key, container, context);
+ } catch (CHKEncodeException e) {
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, e, null), container, context);
+ } catch (IOException e) {
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ }
+ } else if(dataURIs[i] == null && dataBlocks[i] == null)
{
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, "Data block "+i+" cannot be
encoded: no data", null), container, context);
+ }
+ }
+ if(encoded) {
+ for(int i=0;i<checkBlocks.length;i++) {
+ if(checkURIs[i] == null && checkBlocks[i] !=
null) {
+ try {
+ boolean deactivate = false;
+ if(persistent) {
+ deactivate =
!container.ext().isActive(checkBlocks[i]);
+ if(deactivate)
container.activate(checkBlocks[i], 1);
+ }
+ ClientCHK key = (ClientCHK)
encodeBucket(checkBlocks[i]).getClientKey();
+ if(deactivate)
container.deactivate(checkBlocks[i], 1);
+ onEncode(i+dataBlocks.length,
key, container, context);
+ } catch (CHKEncodeException e) {
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, e, null), container, context);
+ } catch (IOException e) {
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ }
+ } else if(checkURIs[i] == null &&
checkBlocks[i] == null) {
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, "Data block "+i+" cannot be
encoded: no data", null), container, context);
+ }
+ }
+ }
+ }
+
+ public void onDecodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets, Bucket[] checkBuckets,
SplitfileBlock[] dataBlockStatus, SplitfileBlock[] checkBlockStatus) {} //
irrevelant
+
+ public void onEncodedSegment(ObjectContainer container, ClientContext
context, FECJob job, Bucket[] dataBuckets, Bucket[] checkBuckets,
SplitfileBlock[] dataBlockStatus, SplitfileBlock[] checkBlockStatus) {
+ if(persistent) {
+ container.activate(parent, 1);
+ container.activate(parent.parent, 1);
+ container.activate(blocks, 2);
+ }
+ boolean fin;
+ synchronized(this) {
+ fin = finished;
+ encodeJob = null;
+ }
+ if(removeOnEncode) {
+ if(logMINOR) Logger.minor(this, "Removing on encode:
"+this);
+ for(int i=0;i<dataBuckets.length;i++) {
+ if(dataBuckets[i] == null) continue;
+ dataBuckets[i].free();
+ if(persistent)
+ dataBuckets[i].removeFrom(container);
+ dataBuckets[i] = null;
+ }
+ for(int i=0;i<checkBuckets.length;i++) {
+ if(checkBuckets[i] == null) continue;
+ checkBuckets[i].free();
+ if(persistent)
+ checkBuckets[i].removeFrom(container);
+ checkBuckets[i] = null;
+ }
+ removeFrom(container, context);
+ return;
+ }
+ if(fin) {
+ Logger.error(this, "Encoded segment even though segment
finished! Freeing buckets...");
+ for(int i=0;i<dataBuckets.length;i++) {
+ if(dataBuckets[i] == null) continue;
+ dataBuckets[i].free();
+ if(persistent)
+ dataBuckets[i].removeFrom(container);
+ dataBuckets[i] = null;
+ }
+ for(int i=0;i<checkBuckets.length;i++) {
+ if(checkBuckets[i] == null) continue;
+ checkBuckets[i].free();
+ if(persistent)
+ checkBuckets[i].removeFrom(container);
+ checkBuckets[i] = null;
+ }
+ return;
+ }
// Start the inserts
try {
- for (int i = 0; i < checkBlockInserters.length; i++) {
- if(checkBlocks[i] == null) continue;
- if(checkBlockInserters[i] != null) continue;
- checkBlockInserters[i] = new
SingleBlockInserter(parent.parent,
- checkBlocks[i], (short) -1,
FreenetURI.EMPTY_CHK_URI,
- blockInsertContext, this,
false, CHKBlock.DATA_LENGTH,
- i + dataBlocks.length,
getCHKOnly, false, false,
- parent.token, false);
- checkBlockInserters[i].schedule();
+ if(logMINOR)
+ Logger.minor(this, "Scheduling
"+checkBlocks.length+" check blocks...");
+ for (int i = 0; i < checkBlocks.length; i++) {
+ // See comments on FECCallback: WE MUST COPY
THE DATA BACK!!!
+ checkBlocks[i] = checkBuckets[i];
+ if(checkBlocks[i] == null) {
+ if(logMINOR)
+ Logger.minor(this, "Skipping
check block "+i+" - is null");
+ continue;
+ }
+ if(persistent)
+ checkBlocks[i].storeTo(container);
+ if(persistent) {
+ container.deactivate(checkBlocks[i], 1);
+ }
}
+ synchronized(this) {
+ for(int i=0;i<checkBlocks.length;i++)
+ blocks.add(dataBlocks.length + i);
+ }
+ if(persistent) container.store(blocks);
} catch (Throwable t) {
Logger.error(this, "Caught " + t + " while encoding " +
this, t);
InsertException ex = new InsertException(
InsertException.INTERNAL_ERROR, t,
null);
- finish(ex);
+ finish(ex, container, context, parent);
+ if(persistent)
+ container.deactivate(parent, 1);
return;
}
synchronized (this) {
encoded = true;
}
+
+ if(persistent) {
+ container.store(this);
+ container.activate(parent, 1);
+ }
// Tell parent only after have started the inserts.
// Because of the counting.
- parent.encodedSegment(this);
+ parent.encodedSegment(this, container, context);
synchronized (this) {
- for (int i = 0; i < dataBlockInserters.length; i++) {
- if (dataBlockInserters[i] == null &&
dataBlocks[i] != null) {
+ for (int i = 0; i < dataBlocks.length; i++) {
+ if (dataFinished[i] && dataBlocks[i] != null) {
+ if(logMINOR) Logger.minor(this,
"Freeing data block "+i+" delayed for encode");
+ if(persistent)
container.activate(dataBlocks[i], 1);
dataBlocks[i].free();
+ if(persistent)
+
dataBlocks[i].removeFrom(container);
dataBlocks[i] = null;
}
}
}
+
+ if(persistent) {
+ container.store(this);
+ container.deactivate(parent, 1);
+ }
+
+ schedule(container, context);
}
- private void finish(InsertException ex) {
+ /**
+ * Caller must activate and pass in parent.
+ * @param ex
+ * @param container
+ * @param context
+ * @param parent
+ */
+ void finish(InsertException ex, ObjectContainer container,
ClientContext context, SplitFileInserter parent) {
if (logMINOR)
Logger.minor(this, "Finishing " + this + " with " + ex,
ex);
synchronized (this) {
@@ -512,23 +656,67 @@
finished = true;
toThrow = ex;
}
- parent.segmentFinished(this);
+ if(persistent) {
+ container.store(this);
+ }
+ parent.segmentFinished(this, container, context);
+ for(int i=0;i<dataBlocks.length;i++) {
+ if(dataBlocks[i] == null) continue;
+ container.activate(dataBlocks[i], 1);
+ dataBlocks[i].free();
+ dataBlocks[i].removeFrom(container);
+ dataBlocks[i] = null;
+ }
+ for(int i=0;i<checkBlocks.length;i++) {
+ if(checkBlocks[i] == null) continue;
+ container.activate(checkBlocks[i], 1);
+ checkBlocks[i].free();
+ checkBlocks[i].removeFrom(container);
+ checkBlocks[i] = null;
+ }
}
- private void finish() {
+ /**
+ * Caller must activate and pass in parent.
+ * @param container
+ * @param context
+ * @param parent
+ */
+ private void finish(ObjectContainer container, ClientContext context,
SplitFileInserter parent) {
+ if(logMINOR) Logger.minor(this, "Finishing "+this);
+ if(persistent)
+ container.activate(errors, 5);
synchronized (this) {
if (finished)
return;
finished = true;
- toThrow = InsertException.construct(errors);
+ if(blocksSucceeded < blocksCompleted)
+ toThrow = InsertException.construct(errors);
}
- parent.segmentFinished(this);
+ if(persistent) {
+ container.store(this);
+ container.deactivate(errors, 5);
+ }
+ unregister(container, context);
+ parent.segmentFinished(this, container, context);
+ for(int i=0;i<dataBlocks.length;i++) {
+ if(dataBlocks[i] == null) continue;
+ container.activate(dataBlocks[i], 1);
+ dataBlocks[i].free();
+ dataBlocks[i].removeFrom(container);
+ dataBlocks[i] = null;
+ }
+ for(int i=0;i<checkBlocks.length;i++) {
+ if(checkBlocks[i] == null) continue;
+ container.activate(checkBlocks[i], 1);
+ checkBlocks[i].free();
+ checkBlocks[i].removeFrom(container);
+ checkBlocks[i] = null;
+ }
}
- public void onEncode(BaseClientKey k, ClientPutState state) {
- ClientCHK key = (ClientCHK) k;
- SingleBlockInserter sbi = (SingleBlockInserter) state;
- int x = sbi.token;
+ private void onEncode(int x, ClientCHK key, ObjectContainer container,
ClientContext context) {
+ if(logMINOR) Logger.minor(this, "Encoded block "+x+" on "+this);
synchronized (this) {
if (finished)
return;
@@ -544,6 +732,10 @@
dataURIs[x] = key;
}
blocksGotURI++;
+ if(persistent)
+ container.store(this);
+ if(logMINOR)
+ Logger.minor(this, "Blocks got URI:
"+blocksGotURI+" of "+(dataBlocks.length + checkBlocks.length));
if (blocksGotURI != dataBlocks.length +
checkBlocks.length)
return;
// Double check
@@ -561,84 +753,15 @@
}
hasURIs = true;
}
- parent.segmentHasURIs(this);
- }
-
- public void onSuccess(ClientPutState state) {
- if (parent.parent.isCancelled()) {
- parent.cancel();
- return;
+ if(persistent) {
+ container.activate(parent, 1);
+ container.store(this);
}
- SingleBlockInserter sbi = (SingleBlockInserter) state;
- int x = sbi.token;
- completed(x);
+ parent.segmentHasURIs(this, container, context);
+ if(persistent)
+ container.deactivate(parent, 1);
}
- public void onFailure(InsertException e, ClientPutState state) {
- if (parent.parent.isCancelled()) {
- parent.cancel();
- return;
- }
- SingleBlockInserter sbi = (SingleBlockInserter) state;
- int x = sbi.token;
- errors.merge(e);
- completed(x);
- }
-
- private void completed(int x) {
- int total = innerCompleted(x);
- if (total == -1)
- return;
- if (total == dataBlockInserters.length) {
- parent.segmentFetchable(this);
- }
- if (total != dataBlockInserters.length +
checkBlockInserters.length)
- return;
- finish();
- }
-
- /**
- * Called when a block has completed.
- *
- * @param x
- * The block number.
- * @return -1 if the segment has already finished, otherwise the number
of
- * completed blocks.
- */
- private synchronized int innerCompleted(int x) {
- if (logMINOR)
- Logger.minor(this, "Completed: " + x + " on " + this
- + " ( completed=" + blocksCompleted +
", total="
- + (dataBlockInserters.length +
checkBlockInserters.length));
-
- if (finished)
- return -1;
- if (x >= dataBlocks.length) {
- x -= dataBlocks.length;
- if (checkBlockInserters[x] == null) {
- Logger.error(this, "Completed twice: check
block " + x + " on "
- + this, new Exception());
- return blocksCompleted;
- }
- checkBlockInserters[x] = null;
- checkBlocks[x].free();
- checkBlocks[x] = null;
- } else {
- if (dataBlockInserters[x] == null) {
- Logger.error(this, "Completed twice: data block
" + x + " on "
- + this, new Exception());
- return blocksCompleted;
- }
- dataBlockInserters[x] = null;
- if (encoded) {
- dataBlocks[x].free();
- dataBlocks[x] = null;
- }
- }
- blocksCompleted++;
- return blocksCompleted;
- }
-
public synchronized boolean isFinished() {
return finished;
}
@@ -663,13 +786,17 @@
return dataURIs;
}
+ /** Get the InsertException for this segment.
+ * NOTE: This will be deleted when the segment is deleted! Do not store
it or pass
+ * it on!
+ */
InsertException getException() {
synchronized (this) {
return toThrow;
}
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
synchronized (this) {
if (finished)
return;
@@ -677,39 +804,43 @@
if (toThrow != null)
toThrow = new
InsertException(InsertException.CANCELLED);
}
- for (int i = 0; i < dataBlockInserters.length; i++) {
- SingleBlockInserter sbi = dataBlockInserters[i];
- if (sbi != null)
- sbi.cancel();
- Bucket d = dataBlocks[i];
- if (d != null) {
- d.free();
- dataBlocks[i] = null;
- }
+ cancelInner(container, context);
+ }
+
+ private void cancelInner(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "Cancelling "+this);
+ super.unregister(container, context);
+ if(persistent) {
+ container.store(this);
+ container.activate(parent, 1);
}
- for (int i = 0; i < checkBlockInserters.length; i++) {
- SingleBlockInserter sbi = checkBlockInserters[i];
- if (sbi != null)
- sbi.cancel();
- Bucket d = checkBlocks[i];
- if (d != null) {
- d.free();
- checkBlocks[i] = null;
- }
+ parent.segmentFinished(this, container, context);
+ for(int i=0;i<dataBlocks.length;i++) {
+ if(dataBlocks[i] == null) continue;
+ if(persistent) container.activate(dataBlocks[i], 1);
+ dataBlocks[i].free();
+ if(persistent) dataBlocks[i].removeFrom(container);
+ dataBlocks[i] = null;
}
- parent.segmentFinished(this);
+ for(int i=0;i<checkBlocks.length;i++) {
+ if(checkBlocks[i] == null) continue;
+ if(persistent) container.activate(checkBlocks[i], 1);
+ checkBlocks[i].free();
+ if(persistent) checkBlocks[i].removeFrom(container);
+ checkBlocks[i] = null;
+ }
}
- public void onTransition(ClientPutState oldState, ClientPutState
newState) {
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container) {
Logger.error(this, "Illegal transition in
SplitFileInserterSegment: "
+ oldState + " -> " + newState);
}
- public void onMetadata(Metadata m, ClientPutState state) {
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
Logger.error(this, "Got onMetadata from " + state);
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
// Ignore
Logger.error(this, "Should not happen: onBlockSetFinished(" +
state
+ ") on " + this);
@@ -723,7 +854,7 @@
return blocksCompleted >= dataBlocks.length;
}
- public void onFetchable(ClientPutState state) {
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
// Ignore
}
@@ -731,8 +862,716 @@
* Force the remaining blocks which haven't been encoded so far to be
* encoded ASAP.
*/
- public void forceEncode() {
-
blockInsertContext.backgroundBlockEncoder.queue(dataBlockInserters);
-
blockInsertContext.backgroundBlockEncoder.queue(checkBlockInserters);
+ public void forceEncode(ObjectContainer container, ClientContext
context) {
+ context.backgroundBlockEncoder.queue(this, container, context);
}
+
+ public void fail(InsertException e, ObjectContainer container,
ClientContext context) {
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "Failing but already
finished on "+this);
+ return;
+ }
+ finished = true;
+ Logger.error(this, "Insert segment failed: "+e+" for
"+this, e);
+ this.toThrow = e;
+ if(persistent) container.store(this);
+ }
+ cancelInner(container, context);
+ }
+
+ public void onFailed(Throwable t, ObjectContainer container,
ClientContext context) {
+ synchronized(this) {
+ if(finished) {
+ Logger.error(this, "FEC decode or encode failed
but already finished: "+t, t);
+ return;
+ }
+ finished = true;
+ Logger.error(this, "Insert segment failed: "+t+" for
"+this, t);
+ this.toThrow = new
InsertException(InsertException.INTERNAL_ERROR, "FEC failure: "+t, null);
+ }
+ cancelInner(container, context);
+ }
+
+ Bucket getBucket(int blockNum) {
+ if(blockNum >= dataBlocks.length)
+ return checkBlocks[blockNum - dataBlocks.length];
+ else
+ return dataBlocks[blockNum];
+ }
+
+ private BlockItem getBlockItem(ObjectContainer container, ClientContext
context, int blockNum) throws IOException {
+ Bucket sourceData = getBucket(blockNum);
+ if(sourceData == null) {
+ Logger.error(this, "Selected block "+blockNum+" but is
null - already finished?? on "+this);
+ return null;
+ }
+ boolean deactivateBucket = false;
+ if(persistent) {
+ deactivateBucket =
!container.ext().isActive(sourceData);
+ if(deactivateBucket)
+ container.activate(sourceData, 1);
+ }
+ Bucket data = sourceData.createShadow();
+ if(data == null) {
+ data =
context.tempBucketFactory.makeBucket(sourceData.size());
+ BucketTools.copy(sourceData, data);
+ }
+ if(logMINOR) Logger.minor(this, "Block "+blockNum+" : bucket
"+sourceData+" shadow "+data);
+ if(persistent) {
+ if(deactivateBucket)
+ container.deactivate(sourceData, 1);
+ }
+ return new BlockItem(this, blockNum, data, persistent);
+ }
+
+ private int hashCodeForBlock(int blockNum) {
+ // FIXME: Standard hashCode() pattern assumes both inputs are
evenly
+ // distributed ... this is not true here.
+ return hashCode() * (blockNum + 1);
+ }
+
+ private static class BlockItem implements SendableRequestItem {
+
+ private final boolean persistent;
+ private final Bucket copyBucket;
+ private final int hashCode;
+ /** STRICTLY for purposes of equals() !!! */
+ private final SplitFileInserterSegment parent;
+ private final int blockNum;
+
+ BlockItem(SplitFileInserterSegment parent, int blockNum, Bucket
bucket, boolean persistent) throws IOException {
+ this.parent = parent;
+ this.blockNum = blockNum;
+ this.copyBucket = bucket;
+ this.hashCode = parent.hashCodeForBlock(blockNum);
+ this.persistent = persistent;
+ }
+
+ public void dump() {
+ copyBucket.free();
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(o instanceof BlockItem) {
+ if(((BlockItem)o).parent == parent &&
((BlockItem)o).blockNum == blockNum) return true;
+ } else if(o instanceof FakeBlockItem) {
+ if(((FakeBlockItem)o).getParent() == parent &&
((FakeBlockItem)o).blockNum == blockNum) return true;
+ }
+ return false;
+ }
+
+ }
+
+ // Used for testing whether a block is already queued.
+ private class FakeBlockItem implements SendableRequestItem {
+
+ private final int blockNum;
+ private final int hashCode;
+
+ FakeBlockItem(int blockNum) {
+ this.blockNum = blockNum;
+ this.hashCode = hashCodeForBlock(blockNum);
+
+ }
+
+ public void dump() {
+ // Do nothing
+ }
+
+ public SplitFileInserterSegment getParent() {
+ return SplitFileInserterSegment.this;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if(o instanceof BlockItem) {
+ if(((BlockItem)o).parent ==
SplitFileInserterSegment.this && ((BlockItem)o).blockNum == blockNum) return
true;
+ } else if(o instanceof FakeBlockItem) {
+ if(((FakeBlockItem)o).getParent() ==
SplitFileInserterSegment.this && ((FakeBlockItem)o).blockNum == blockNum)
return true;
+ }
+ return false;
+ }
+ }
+
+ @Override
+ public void onFailure(LowLevelPutException e, Object keyNum,
ObjectContainer container, ClientContext context) {
+ BlockItem block = (BlockItem) keyNum;
+ synchronized(this) {
+ if(finished) return;
+ }
+ // First report the error.
+ if(persistent)
+ container.activate(errors, 5);
+ switch(e.code) {
+ case LowLevelPutException.COLLISION:
+ Logger.error(this, "Collision on a CHK?!?!?");
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, "Collision on a CHK", null),
container, context);
+ return;
+ case LowLevelPutException.INTERNAL_ERROR:
+ Logger.error(this, "Internal error: "+e, e);
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, e.toString(), null), container,
context);
+ return;
+ case LowLevelPutException.REJECTED_OVERLOAD:
+ errors.inc(InsertException.REJECTED_OVERLOAD);
+ break;
+ case LowLevelPutException.ROUTE_NOT_FOUND:
+ errors.inc(InsertException.ROUTE_NOT_FOUND);
+ break;
+ case LowLevelPutException.ROUTE_REALLY_NOT_FOUND:
+ errors.inc(InsertException.ROUTE_REALLY_NOT_FOUND);
+ break;
+ default:
+ Logger.error(this, "Unknown LowLevelPutException code:
"+e.code);
+ fail(new
InsertException(InsertException.INTERNAL_ERROR, e.toString(), null), container,
context);
+ return;
+ }
+ if(persistent)
+ container.store(errors);
+ boolean isRNF = e.code == LowLevelPutException.ROUTE_NOT_FOUND
||
+ e.code == LowLevelPutException.ROUTE_REALLY_NOT_FOUND;
+ int blockNum = block.blockNum;
+ if(logMINOR) Logger.minor(this, "Block "+blockNum+" failed on
"+this+" : "+e);
+ boolean treatAsSuccess = false;
+ boolean failedBlock = false;
+ int completed;
+ int succeeded;
+ synchronized(this) {
+ if(blockNum >= dataBlocks.length) {
+ // Check block.
+ int checkNum = blockNum - dataBlocks.length;
+ if(checkFinished[checkNum]) {
+ if(checkFailed[checkNum])
+ Logger.error(this, "Got
onFailure() but block has already failed! Check block "+checkNum+" on "+this);
+ else
+ Logger.error(this, "Got
onFailure() but block has already succeeded: Check block "+checkNum+" on
"+this);
+ return;
+ }
+ if(isRNF) {
+ checkConsecutiveRNFs[checkNum]++;
+ if(persistent)
container.activate(blockInsertContext, 1);
+ if(logMINOR) Logger.minor(this,
"Consecutive RNFs: "+checkConsecutiveRNFs[checkNum]+" /
"+blockInsertContext.consecutiveRNFsCountAsSuccess);
+ if(checkConsecutiveRNFs[checkNum] ==
blockInsertContext.consecutiveRNFsCountAsSuccess) {
+ // Treat as success
+ treatAsSuccess = true;
+ }
+ } else {
+ checkConsecutiveRNFs[checkNum] = 0;
+ }
+ if(!treatAsSuccess) {
+ checkRetries[checkNum]++;
+ if(checkRetries[checkNum] > maxRetries
&& maxRetries != -1) {
+ failedBlock = true;
+ // Treat as failed.
+ checkFinished[checkNum] = true;
+ checkFailed[checkNum] = true;
+ blocksCompleted++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new
Integer(blockNum));
+ if(persistent)
container.store(blocks);
+ if(checkBlocks[checkNum] !=
null) {
+ if(persistent)
container.activate(checkBlocks[checkNum], 1);
+
checkBlocks[checkNum].free();
+ if(persistent)
checkBlocks[checkNum].removeFrom(container);
+ checkBlocks[checkNum] =
null;
+ if(logMINOR)
Logger.minor(this, "Failed to insert check block "+checkNum+" on "+this);
+ } else {
+ Logger.error(this,
"Check block "+checkNum+" failed on "+this+" but bucket is already nulled
out!");
+ }
+ }
+ // Else we are still registered, but
will have to be
+ // re-selected: for persistent
requests, the current
+ // PersistentChosenRequest will not
re-run the same block.
+ // This is okay!
+ } else {
+ // Better handle it here to minimize
race conditions. :|
+ checkFinished[checkNum] = true;
+ checkFailed[checkNum] = false; //
Treating as succeeded
+ blocksCompleted++;
+ blocksSucceeded++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new Integer(blockNum));
+ if(persistent) container.store(blocks);
+ if(checkBlocks[checkNum] != null) {
+ if(persistent)
container.activate(checkBlocks[checkNum], 1);
+ checkBlocks[checkNum].free();
+ if(persistent)
checkBlocks[checkNum].removeFrom(container);
+ checkBlocks[checkNum] = null;
+ if(logMINOR) Logger.minor(this,
"Repeated RNF, treating as success for check block "+checkNum+" on "+this);
+ } else {
+ Logger.error(this, "Check block
"+checkNum+" succeeded (sort of) on "+this+" but bucket is already nulled
out!");
+ }
+ }
+ } else {
+ // Data block.
+ if(dataFinished[blockNum]) {
+ if(dataFailed[blockNum])
+ Logger.error(this, "Got
onFailure() but block has already failed! Data block "+blockNum+" on "+this);
+ else
+ Logger.error(this, "Got
onFailure() but block has already succeeded: Data block "+blockNum+" on "+this);
+ return;
+ }
+ if(isRNF) {
+ dataConsecutiveRNFs[blockNum]++;
+ if(persistent)
container.activate(blockInsertContext, 1);
+ if(logMINOR) Logger.minor(this,
"Consecutive RNFs: "+dataConsecutiveRNFs[blockNum]+" /
"+blockInsertContext.consecutiveRNFsCountAsSuccess);
+ if(dataConsecutiveRNFs[blockNum] ==
blockInsertContext.consecutiveRNFsCountAsSuccess) {
+ // Treat as success
+ treatAsSuccess = true;
+ }
+ } else {
+ dataConsecutiveRNFs[blockNum] = 0;
+ }
+ if(!treatAsSuccess) {
+ dataRetries[blockNum]++;
+ if(dataRetries[blockNum] > maxRetries
&& maxRetries != -1) {
+ failedBlock = true;
+ // Treat as failed.
+ dataFinished[blockNum] = true;
+ dataFailed[blockNum] = true;
+ blocksCompleted++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new
Integer(blockNum));
+ if(persistent)
container.store(blocks);
+ if(dataBlocks[blockNum] !=
null) {
+ if(persistent)
container.activate(dataBlocks[blockNum], 1);
+
dataBlocks[blockNum].free();
+ if(persistent)
dataBlocks[blockNum].removeFrom(container);
+ dataBlocks[blockNum] =
null;
+ if(logMINOR)
Logger.minor(this, "Failed to insert data block "+blockNum+" on "+this);
+ } else {
+ Logger.error(this,
"Data block "+blockNum+" failed on "+this+" but bucket is already nulled out!");
+ }
+ }
+ // Else we are still registered, but
will have to be
+ // re-selected: for persistent
requests, the current
+ // PersistentChosenRequest will not
re-run the same block.
+ // This is okay!
+ } else {
+ // Better handle it here to minimize
race conditions. :|
+ dataFinished[blockNum] = true;
+ dataFailed[blockNum] = false; //
Treating as succeeded
+ blocksCompleted++;
+ blocksSucceeded++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new Integer(blockNum));
+ if(persistent) container.store(blocks);
+ if(dataBlocks[blockNum] != null &&
encoded) {
+ if(persistent)
container.activate(dataBlocks[blockNum], 1);
+ dataBlocks[blockNum].free();
+ if(persistent)
dataBlocks[blockNum].removeFrom(container);
+ dataBlocks[blockNum] = null;
+ if(logMINOR) Logger.minor(this,
"Repeated RNF, treating as success for data block "+blockNum+" on "+this);
+ } else {
+ Logger.error(this, "Data block
"+blockNum+" succeeded (sort of) on "+this+" but bucket is already nulled
out!");
+ }
+ }
+ }
+ if(persistent)
+ container.store(this);
+ completed = blocksCompleted;
+ succeeded = blocksSucceeded;
+ }
+ if(persistent) container.activate(putter, 1);
+ if(failedBlock)
+ putter.failedBlock(container, context);
+ else if(treatAsSuccess)
+ putter.completedBlock(false, container, context);
+ if(persistent) container.deactivate(putter, 1);
+ if(treatAsSuccess && succeeded == dataBlocks.length) {
+ if(persistent) container.activate(parent, 1);
+ parent.segmentFetchable(this, container);
+ if(persistent) container.deactivate(parent, 1);
+ } else if(completed == dataBlocks.length + checkBlocks.length) {
+ if(persistent) container.activate(parent, 1);
+ finish(container, context, parent);
+ if(persistent) container.deactivate(parent, 1);
+ }
+ }
+
+ @Override
+ public void onSuccess(Object keyNum, ObjectContainer container,
ClientContext context) {
+ BlockItem block = (BlockItem) keyNum;
+ int blockNum = block.blockNum;
+ int completed;
+ int succeeded;
+ if(logMINOR) Logger.minor(this, "Block "+blockNum+" succeeded
on "+this);
+ synchronized(this) {
+ if(finished) {
+ return;
+ }
+ if(blockNum >= dataBlocks.length) {
+ // Check block.
+ int checkNum = blockNum - dataBlocks.length;
+ if(!checkFinished[checkNum]) {
+ checkFinished[checkNum] = true;
+ checkFailed[checkNum] = false;
+ blocksCompleted++;
+ blocksSucceeded++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new Integer(blockNum));
+ if(persistent) container.store(blocks);
+ } else {
+ if(checkFailed[checkNum])
+ Logger.error(this, "Got
onSuccess() but block has already failed! Check block "+checkNum+" on "+this);
+ else
+ Logger.error(this, "Got
onSuccess() but block has already succeeded: Check block "+checkNum+" on
"+this);
+ return;
+ }
+ if(checkBlocks[checkNum] != null) {
+ if(persistent)
container.activate(checkBlocks[checkNum], 1);
+ checkBlocks[checkNum].free();
+ if(persistent)
checkBlocks[checkNum].removeFrom(container);
+ checkBlocks[checkNum] = null;
+ } else {
+ Logger.error(this, "Check block
"+checkNum+" succeeded on "+this+" but bucket is already nulled out!");
+ }
+ } else {
+ // Data block
+ if(!dataFinished[blockNum]) {
+ dataFinished[blockNum] = true;
+ dataFailed[blockNum] = false;
+ blocksCompleted++;
+ blocksSucceeded++;
+ if(persistent)
container.activate(blocks, 2);
+ blocks.remove(new Integer(blockNum));
+ if(persistent) container.store(blocks);
+ } else {
+ if(dataFailed[blockNum])
+ Logger.error(this, "Got
onSuccess() but block has already failed! Data block "+blockNum+" on "+this);
+ else
+ Logger.error(this, "Got
onSuccess() but block has already succeeded: Data block "+blockNum+" on "+this);
+ return;
+ }
+ // Data blocks may not be freed until after we
have encoded the check blocks.
+ if(encoded && dataBlocks[blockNum] != null) {
+ if(persistent)
container.activate(dataBlocks[blockNum], 1);
+ dataBlocks[blockNum].free();
+ if(persistent)
dataBlocks[blockNum].removeFrom(container);
+ dataBlocks[blockNum] = null;
+ } else if(dataBlocks[blockNum] == null) {
+ Logger.error(this, "Data block
"+blockNum+" succeeded on "+this+" but bucket is already nulled out!");
+ if(persistent) Logger.minor(this,
"Activation state: "+container.ext().isActive(this));
+ }
+ }
+ if(persistent)
+ container.store(this);
+ completed = blocksCompleted;
+ succeeded = blocksSucceeded;
+ }
+ if(persistent) container.activate(putter, 1);
+ putter.completedBlock(false, container, context);
+ if(persistent) container.deactivate(putter, 1);
+ if(succeeded == dataBlocks.length) {
+ if(persistent) container.activate(parent, 1);
+ parent.segmentFetchable(this, container);
+ if(persistent) container.deactivate(parent, 1);
+ } else if(completed == dataBlocks.length + checkBlocks.length) {
+ if(persistent) container.activate(parent, 1);
+ finish(container, context, parent);
+ if(persistent) container.deactivate(parent, 1);
+ }
+ }
+
+ @Override
+ public SendableRequestItem[] allKeys(ObjectContainer container,
ClientContext context) {
+ return sendableKeys(container, context);
+ }
+
+ @Override
+ public SendableRequestItem chooseKey(KeysFetchingLocally keys,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(blocks, 1);
+ }
+ synchronized(this) {
+ if(finished) return null;
+ if(blocks.isEmpty()) {
+ if(logMINOR)
+ Logger.minor(this, "No blocks to
remove");
+ return null;
+ }
+ for(int i=0;i<10;i++) {
+ Integer ret;
+ int x;
+ if(blocks.size() == 0) return null;
+ x = context.random.nextInt(blocks.size());
+ ret = blocks.get(x);
+ int num = ret;
+
+ // Check whether it is already running
+ if(!persistent) {
+ if(keys.hasTransientInsert(this, new
FakeBlockItem(num)))
+ return null;
+ }
+
+ try {
+ return getBlockItem(container, context,
num);
+ } catch (IOException e) {
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ return null;
+ }
+ }
+ return null;
+ }
+ }
+
+ @Override
+ public RequestClient getClient(ObjectContainer container) {
+ if(persistent) container.activate(putter, 1);
+ return putter.getClient();
+ }
+
+ @Override
+ public ClientRequester getClientRequest() {
+ return putter;
+ }
+
+ @Override
+ public short getPriorityClass(ObjectContainer container) {
+ if(persistent) container.activate(putter, 1);
+ return putter.getPriorityClass();
+ }
+
+ @Override
+ public int getRetryCount() {
+ // No point scheduling inserts by retry count.
+ // FIXME: Either implement sub-segments to schedule by retry
count,
+ // or (more likely imho) make the scheduler not care about
retry counts for inserts.
+ return 0;
+ }
+
+ @Override
+ public SendableRequestSender getSender(ObjectContainer container,
ClientContext context) {
+ return new SendableRequestSender() {
+
+ public boolean send(NodeClientCore core,
RequestScheduler sched, final ClientContext context, ChosenBlock req) {
+ // Ignore keyNum, key, since we're only sending
one block.
+ try {
+ BlockItem block = (BlockItem) req.token;
+ if(logMINOR) Logger.minor(this,
"Starting request: "+SplitFileInserterSegment.this+" block number
"+block.blockNum);
+ ClientCHKBlock b;
+ try {
+ b =
encodeBucket(block.copyBucket);
+ } catch (CHKEncodeException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (MalformedURLException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } catch (IOException e) {
+ throw new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, e.toString() + ":" +
e.getMessage(), e);
+ } finally {
+ block.copyBucket.free();
+ }
+ final ClientCHK key = (ClientCHK)
b.getClientKey();
+ final int num = block.blockNum;
+ if(block.persistent) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(!container.ext().isStored(SplitFileInserterSegment.this)) return;
+
container.activate(SplitFileInserterSegment.this, 1);
+ onEncode(num, key,
container, context);
+
container.deactivate(SplitFileInserterSegment.this, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY+1, false);
+ } else {
+
context.mainExecutor.execute(new Runnable() {
+
+ public void run() {
+ onEncode(num,
key, null, context);
+ }
+
+ }, "Got URI");
+
+ }
+ if(b != null)
+ core.realPut(b,
req.cacheLocalRequests);
+ else {
+ Logger.error(this, "Asked to
send empty block on "+SplitFileInserterSegment.this, new Exception("error"));
+ return false;
+ }
+ } catch (LowLevelPutException e) {
+ req.onFailure(e, context);
+ if(logMINOR) Logger.minor(this,
"Request failed: "+SplitFileInserterSegment.this+" for "+e);
+ return true;
+ }
+ if(logMINOR) Logger.minor(this, "Request
succeeded: "+SplitFileInserterSegment.this);
+ req.onInsertSuccess(context);
+ return true;
+ }
+
+ };
+ }
+
+ protected ClientCHKBlock encodeBucket(Bucket copyBucket) throws
CHKEncodeException, IOException {
+ return ClientCHKBlock.encode(copyBucket, false, true,
(short)-1, CHKBlock.DATA_LENGTH);
+ }
+
+ @Override
+ public boolean isCancelled(ObjectContainer container) {
+ return finished;
+ }
+
+ @Override
+ public boolean isSSK() {
+ return false;
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ if(persistent) {
+ container.activate(blocks, 1);
+ }
+ Integer[] blockNumbers;
+ synchronized(this) {
+ blockNumbers = blocks.toArray(new
Integer[blocks.size()]);
+ }
+ ArrayList<PersistentChosenBlock> ret = new
ArrayList<PersistentChosenBlock>();
+ Arrays.sort(blockNumbers);
+ int prevBlockNumber = -1;
+ for(int i=0;i<blockNumbers.length;i++) {
+ int blockNumber = blockNumbers[i];
+ if(blockNumber == prevBlockNumber) {
+ Logger.error(this, "Duplicate block number in
makeBlocks() in "+this+": two copies of "+blockNumber);
+ continue;
+ }
+ prevBlockNumber = blockNumber;
+ SendableRequestItem item;
+ try {
+ item = getBlockItem(container, context,
blockNumber);
+ } catch (IOException e) {
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ return null;
+ }
+ PersistentChosenBlock block = new
PersistentChosenBlock(true, request, item, null, null, sched);
+ if(logMINOR) Logger.minor(this, "Created block
"+block+" for block number "+blockNumber+" on "+this);
+ ret.add(block);
+ }
+ blocks.trimToSize();
+ if(persistent) {
+ container.deactivate(blocks, 1);
+ }
+ if(logMINOR) Logger.minor(this, "Returning "+ret.size()+"
blocks");
+ return ret;
+ }
+
+ @Override
+ public synchronized SendableRequestItem[] sendableKeys(ObjectContainer
container, ClientContext context) {
+ if(persistent) {
+ container.activate(blocks, 1);
+ }
+ SendableRequestItem[] items = new
SendableRequestItem[blocks.size()];
+ for(int i=0;i<blocks.size();i++)
+ try {
+ items[i] = getBlockItem(container, context,
blocks.get(i));
+ } catch (IOException e) {
+ fail(new
InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
+ return null;
+ }
+ if(persistent) {
+ container.deactivate(blocks, 1);
+ }
+ return items;
+ }
+
+ public synchronized boolean isEmpty(ObjectContainer container) {
+ if(persistent) container.activate(blocks, 2);
+ boolean ret = (finished || blocks.isEmpty());
+ if(persistent) container.deactivate(blocks, 1);
+ return ret;
+ }
+
+ private boolean removeOnEncode;
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(encodeJob != null) {
+ if(!encodeJob.cancel(container, context)) {
+ synchronized(this) {
+ removeOnEncode = true;
+ if(logMINOR) Logger.minor(this, "Will
remove after encode finished: "+this);
+ container.store(this);
+ return;
+ }
+ }
+ encodeJob = null;
+ }
+ // parent, putter can deal with themselves
+ for(int i=0;i<dataBlocks.length;i++) {
+ if(dataBlocks[i] == null) continue;
+ container.activate(dataBlocks[i], 1);
+ dataBlocks[i].free();
+ dataBlocks[i].removeFrom(container);
+ dataBlocks[i] = null;
+ }
+ for(int i=0;i<checkBlocks.length;i++) {
+ if(checkBlocks[i] == null) continue;
+ container.activate(checkBlocks[i], 1);
+ checkBlocks[i].free();
+ checkBlocks[i].removeFrom(container);
+ checkBlocks[i] = null;
+ }
+ for(ClientCHK chk : dataURIs) {
+ if(chk != null) {
+ if(logMINOR) Logger.minor(this, "dataURI is
null on "+this);
+ container.activate(chk, 5);
+ chk.removeFrom(container);
+ }
+ }
+ for(ClientCHK chk : checkURIs) {
+ if(chk != null) {
+ if(logMINOR) Logger.minor(this, "checkURI is
null on "+this);
+ container.activate(chk, 5);
+ chk.removeFrom(container);
+ }
+ }
+ container.activate(blocks, 5);
+ for(Integer i : blocks) {
+ container.activate(i, 1);
+ container.delete(i);
+ }
+ container.delete(blocks);
+ if(toThrow != null) {
+ container.activate(toThrow, 5);
+ toThrow.removeFrom(container);
+ }
+ if(errors != null) {
+ container.activate(errors, 1);
+ errors.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ @Override
+ public boolean cacheInserts(ObjectContainer container) {
+ boolean deactivate = false;
+ if(persistent) {
+ deactivate =
!container.ext().isActive(blockInsertContext);
+ if(deactivate)
+ container.activate(blockInsertContext, 1);
+ }
+ boolean retval = blockInsertContext.cacheLocalRequests;
+ if(deactivate)
+ container.deactivate(blockInsertContext, 1);
+ return retval;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(finished) {
+ Logger.error(this, "Storing "+this+" when already
finished!", new Exception("error"));
+ return false;
+ }
+ if(logMINOR) Logger.minor(this, "Storing "+this+"
activated="+container.ext().isActive(this)+"
stored="+container.ext().isStored(this), new Exception("debug"));
+ return true;
+ }
+
}
Copied: trunk/freenet/src/freenet/client/async/TransientChosenBlock.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/TransientChosenBlock.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/TransientChosenBlock.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/TransientChosenBlock.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,73 @@
+package freenet.client.async;
+
+import freenet.keys.ClientKey;
+import freenet.keys.ClientKeyBlock;
+import freenet.keys.Key;
+import freenet.node.LowLevelGetException;
+import freenet.node.LowLevelPutException;
+import freenet.node.RequestScheduler;
+import freenet.node.SendableGet;
+import freenet.node.SendableInsert;
+import freenet.node.SendableRequest;
+import freenet.node.SendableRequestItem;
+import freenet.node.SendableRequestSender;
+
+/**
+ * A ChosenBlock which isn't persistent.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ *
+ */
+public class TransientChosenBlock extends ChosenBlock {
+
+ public final SendableRequest request;
+ public final RequestScheduler sched;
+
+ public TransientChosenBlock(SendableRequest req, SendableRequestItem
token, Key key, ClientKey ckey,
+ boolean localRequestOnly, boolean cacheLocalRequests,
boolean ignoreStore, RequestScheduler sched) {
+ super(token, key, ckey, localRequestOnly, cacheLocalRequests,
ignoreStore, sched);
+ this.request = req;
+ this.sched = sched;
+ }
+
+ @Override
+ public boolean isCancelled() {
+ return request.isCancelled(null);
+ }
+
+ @Override
+ public boolean isPersistent() {
+ return false;
+ }
+
+ public void onFailure(LowLevelPutException e, ClientContext context) {
+ ((SendableInsert) request).onFailure(e, token, null, context);
+ }
+
+ public void onInsertSuccess(ClientContext context) {
+ ((SendableInsert) request).onSuccess(token, null, context);
+ }
+
+ public void onFailure(LowLevelGetException e, ClientContext context) {
+ ((SendableGet) request).onFailure(e, token, null, context);
+ }
+
+ public void onSuccess(ClientKeyBlock data, boolean fromStore,
ClientContext context) {
+ ((SendableGet) request).onSuccess(data, fromStore, token, null,
context);
+ }
+
+ @Override
+ public void onFetchSuccess(ClientContext context) {
+ sched.succeeded((SendableGet)request, this);
+ }
+
+ @Override
+ public short getPriority() {
+ return request.getPriorityClass(null);
+ }
+
+ @Override
+ public SendableRequestSender getSender(ClientContext context) {
+ return request.getSender(null, context);
+ }
+
+}
Copied: trunk/freenet/src/freenet/client/async/TransientSendableRequestSet.java
(from rev 26320,
branches/db4o/freenet/src/freenet/client/async/TransientSendableRequestSet.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/TransientSendableRequestSet.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/TransientSendableRequestSet.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,43 @@
+package freenet.client.async;
+
+import java.util.HashSet;
+
+import com.db4o.ObjectContainer;
+
+import freenet.node.SendableRequest;
+import freenet.support.Logger;
+
+/**
+ * Since we don't need to worry about activation, we can simply use a HashSet.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class TransientSendableRequestSet implements SendableRequestSet {
+
+ private final HashSet<SendableRequest> set;
+
+ TransientSendableRequestSet() {
+ set = new HashSet<SendableRequest>();
+ }
+
+ public synchronized boolean addRequest(SendableRequest req,
ObjectContainer container) {
+ return set.add(req);
+ }
+
+ public synchronized SendableRequest[] listRequests(ObjectContainer
container) {
+ return set.toArray(new SendableRequest[set.size()]);
+ }
+
+ public boolean removeRequest(SendableRequest req, ObjectContainer
container) {
+ return set.remove(req);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing TransientSendableRequestSet in
database", new Exception("error"));
+ return false;
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/USKCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKCallback.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.keys.USK;
/**
@@ -19,7 +21,7 @@
* @param key
* A copy of the key with new edition set
*/
- void onFoundEdition(long l, USK key);
+ void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean metadata, short codec, byte[] data);
/**
* Priority at which the polling should run normally.
Modified: trunk/freenet/src/freenet/client/async/USKChecker.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKChecker.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKChecker.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,12 +3,13 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.ClientSSKBlock;
import freenet.node.LowLevelGetException;
-import freenet.node.RequestScheduler;
import freenet.support.Logger;
/**
@@ -20,20 +21,27 @@
private int dnfs;
USKChecker(USKCheckerCallback cb, ClientKey key, int maxRetries,
FetchContext ctx, ClientRequester parent) {
- super(key, maxRetries, ctx, parent);
+ super(key, maxRetries, ctx, parent, false);
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Created USKChecker for "+key);
this.cb = cb;
}
@Override
- public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, RequestScheduler sched) {
- unregister(false);
- cb.onSuccess((ClientSSKBlock)block);
+ public void onSuccess(ClientKeyBlock block, boolean fromStore, Object
token, ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(cb, 1);
+ }
+ cb.onSuccess((ClientSSKBlock)block, context);
}
@Override
- public void onFailure(LowLevelGetException e, Object token,
RequestScheduler sched) {
+ public void onFailure(LowLevelGetException e, Object token,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(this, 1);
+ container.activate(cb, 1);
+ }
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "onFailure: "+e+" for "+this);
// Firstly, can we retry?
@@ -63,22 +71,22 @@
canRetry = true;
}
- if(canRetry && retry(sched, ctx.executor)) return;
+ if(canRetry && retry(container, context)) return;
// Ran out of retries.
- unregister(false);
+ unregisterAll(container, context);
if(e.code == LowLevelGetException.CANCELLED){
- cb.onCancelled();
+ cb.onCancelled(context);
return;
}else if(e.code == LowLevelGetException.DECODE_FAILED){
- cb.onFatalAuthorError();
+ cb.onFatalAuthorError(context);
return;
}
// Rest are non-fatal. If have DNFs, DNF, else network error.
if(dnfs > 0)
- cb.onDNF();
+ cb.onDNF(context);
else
- cb.onNetworkError();
+ cb.onNetworkError(context);
}
@Override
@@ -86,8 +94,12 @@
return "USKChecker for "+key.getURI()+" for "+cb;
}
- @Override
public short getPriorityClass() {
return cb.getPriority();
}
+
+ public void onFailed(KeyListenerConstructionException e,
ObjectContainer container, ClientContext context) {
+ onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR, "IMPOSSIBLE: Failed
to create Bloom filters (we don't have any!)", e), null, container, context);
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/USKCheckerCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKCheckerCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKCheckerCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -11,20 +11,20 @@
interface USKCheckerCallback {
/** Data Not Found */
- public void onDNF();
+ public void onDNF(ClientContext context);
/** Successfully found the latest version of the key
* @param block */
- public void onSuccess(ClientSSKBlock block);
+ public void onSuccess(ClientSSKBlock block, ClientContext context);
/** Error committed by author */
- public void onFatalAuthorError();
+ public void onFatalAuthorError(ClientContext context);
/** Network on our node or on nodes we have been talking to */
- public void onNetworkError();
+ public void onNetworkError(ClientContext context);
/** Request cancelled */
- public void onCancelled();
+ public void onCancelled(ClientContext context);
/** Get priority to run the request at */
public short getPriority();
Modified: trunk/freenet/src/freenet/client/async/USKFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKFetcher.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKFetcher.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,6 +9,8 @@
import java.util.LinkedList;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.keys.ClientSSKBlock;
import freenet.keys.FreenetURI;
@@ -18,6 +20,7 @@
import freenet.support.Logger;
import freenet.support.LogThresholdCallback;
import freenet.support.api.Bucket;
+import freenet.support.io.BucketTools;
/**
*
@@ -124,47 +127,49 @@
this.dnf = false;
this.checker = new USKChecker(this, origUSK.getSSK(i),
ctx.maxUSKRetries, ctx, parent);
}
- public void onDNF() {
+ public void onDNF(ClientContext context) {
checker = null;
dnf = true;
- USKFetcher.this.onDNF(this);
+ USKFetcher.this.onDNF(this, context);
}
- public void onSuccess(ClientSSKBlock block) {
+ public void onSuccess(ClientSSKBlock block, ClientContext
context) {
checker = null;
succeeded = true;
- USKFetcher.this.onSuccess(this, false, block);
+ USKFetcher.this.onSuccess(this, false, block, context);
}
- public void onFatalAuthorError() {
+ public void onFatalAuthorError(ClientContext context) {
checker = null;
// Counts as success except it doesn't update
- USKFetcher.this.onSuccess(this, true, null);
+ USKFetcher.this.onSuccess(this, true, null, context);
}
- public void onNetworkError() {
+ public void onNetworkError(ClientContext context) {
checker = null;
// Not a DNF
- USKFetcher.this.onFail(this);
+ USKFetcher.this.onFail(this, context);
}
- public void onCancelled() {
+ public void onCancelled(ClientContext context) {
checker = null;
- USKFetcher.this.onCancelled(this);
+ USKFetcher.this.onCancelled(this, context);
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext
context) {
+ assert(container == null);
cancelled = true;
if(checker != null)
- checker.cancel();
- onCancelled();
+ checker.cancel(container, context);
+ onCancelled(context);
}
- public void schedule() {
+ public void schedule(ObjectContainer container, ClientContext
context) {
+ assert(container == null);
if(checker == null) {
if(logMINOR)
Logger.minor(this, "Checker == null in
schedule() for "+this, new Exception("debug"));
} else
- checker.schedule();
+ checker.schedule(container, context);
}
@Override
@@ -239,7 +244,7 @@
this.keepLastData = keepLastData;
}
- void onDNF(USKAttempt att) {
+ void onDNF(USKAttempt att, ClientContext context) {
if(logMINOR) Logger.minor(this, "DNF: "+att);
boolean finished = false;
long curLatest = uskManager.lookup(origUSK);
@@ -255,11 +260,11 @@
} else if(logMINOR) Logger.minor(this, "Remaining:
"+runningAttempts.size());
}
if(finished) {
- finishSuccess();
+ finishSuccess(context);
}
}
- private void finishSuccess() {
+ private void finishSuccess(ClientContext context) {
if(backgroundPoll) {
long valAtEnd = uskManager.lookup(origUSK);
long end;
@@ -271,7 +276,7 @@
int newSleepTime = sleepTime * 2;
if(newSleepTime > maxSleepTime) newSleepTime =
maxSleepTime;
sleepTime = newSleepTime;
- end = now + ctx.random.nextInt(sleepTime);
+ end = now + context.random.nextInt(sleepTime);
if(valAtEnd > valueAtSchedule) {
// We have advanced; keep trying as if
we just started.
@@ -286,7 +291,7 @@
minFailures = newMinFailures;
}
}
- schedule(end-now);
+ schedule(end-now, null, context);
} else {
long ed = uskManager.lookup(origUSK);
USKFetcherCallback[] cb;
@@ -294,9 +299,20 @@
completed = true;
cb = callbacks.toArray(new
USKFetcherCallback[callbacks.size()]);
}
+ byte[] data;
+ if(lastRequestData == null)
+ data = null;
+ else {
+ try {
+ data =
BucketTools.toByteArray(lastRequestData);
+ } catch (IOException e) {
+ Logger.error(this, "Unable to turn
lastRequestData into byte[]: caught I/O exception: "+e, e);
+ data = null;
+ }
+ }
for(int i=0;i<cb.length;i++) {
try {
- cb[i].onFoundEdition(ed,
origUSK.copy(ed));
+ cb[i].onFoundEdition(ed,
origUSK.copy(ed), null, context, lastWasMetadata, lastCompressionCodec, data);
} catch (Exception e) {
Logger.error(this, "An exception
occured while dealing with a callback:"+cb[i].toString()+"\n"+e.getMessage(),e);
}
@@ -304,7 +320,7 @@
}
}
- void onSuccess(USKAttempt att, boolean dontUpdate, ClientSSKBlock
block) {
+ void onSuccess(USKAttempt att, boolean dontUpdate, ClientSSKBlock
block, final ClientContext context) {
LinkedList<USKAttempt> l = null;
final long lastEd = uskManager.lookup(origUSK);
long curLatest;
@@ -326,12 +342,12 @@
l.add(add(i));
}
}
- cancelBefore(curLatest);
+ cancelBefore(curLatest, context);
}
Bucket data = null;
if(decode) {
try {
- data = block.decode(ctx.bucketFactory, 1025 /*
it's an SSK */, true);
+ data =
block.decode(context.getBucketFactory(parent.persistent()), 1025 /* it's an SSK
*/, true);
} catch (KeyDecodeException e) {
data = null;
} catch (IOException e) {
@@ -352,12 +368,12 @@
}
}
if(!dontUpdate)
- uskManager.update(origUSK, curLatest);
+ uskManager.update(origUSK, curLatest, context);
if(l == null) return;
final LinkedList<USKAttempt> toSched = l;
// If we schedule them here, we don't get icky recursion
problems.
if(!cancelled) {
- ctx.executor.execute(new Runnable() {
+ context.mainExecutor.execute(new Runnable() {
public void run() {
long last = lastEd;
for(Iterator<USKAttempt>
i=toSched.iterator();i.hasNext();) {
@@ -366,7 +382,7 @@
USKAttempt a = i.next();
last =
uskManager.lookup(origUSK);
if((last <= a.number) &&
!a.cancelled)
- a.schedule();
+ a.schedule(null,
context);
else {
synchronized(this) {
runningAttempts.remove(a);
@@ -378,35 +394,35 @@
}
}
- public void onCancelled(USKAttempt att) {
+ void onCancelled(USKAttempt att, ClientContext context) {
synchronized(this) {
runningAttempts.remove(att);
if(!runningAttempts.isEmpty()) return;
if(cancelled)
- finishCancelled();
+ finishCancelled(context);
}
}
- private void finishCancelled() {
+ private void finishCancelled(ClientContext context) {
USKFetcherCallback[] cb;
synchronized(this) {
completed = true;
cb = callbacks.toArray(new
USKFetcherCallback[callbacks.size()]);
}
for(int i=0;i<cb.length;i++)
- cb[i].onCancelled();
+ cb[i].onCancelled(null, context);
}
- public void onFail(USKAttempt attempt) {
+ public void onFail(USKAttempt attempt, ClientContext context) {
// FIXME what else can we do?
// Certainly we don't want to continue fetching indefinitely...
// ... e.g. RNFs don't indicate we should try a later slot,
none of them
// really do.
- onDNF(attempt);
+ onDNF(attempt, context);
}
- private void cancelBefore(long curLatest) {
+ private void cancelBefore(long curLatest, ClientContext context) {
Vector<USKAttempt> v = null;
int count = 0;
synchronized(this) {
@@ -423,7 +439,7 @@
if(v != null) {
for(int i=0;i<v.size();i++) {
USKAttempt att = v.get(i);
- att.cancel();
+ att.cancel(null, context);
}
}
}
@@ -463,19 +479,20 @@
return origUSK;
}
- public void schedule(long delay) {
+ public void schedule(long delay, ObjectContainer container, final
ClientContext context) {
+ assert(container == null);
if (delay<=0) {
- schedule();
+ schedule(container, context);
} else {
- ctx.ticker.queueTimedJob(new Runnable() {
+ context.ticker.queueTimedJob(new Runnable() {
public void run() {
- USKFetcher.this.schedule();
+ USKFetcher.this.schedule(null, context);
}
}, delay);
}
}
- public void schedule() {
+ public void schedule(ObjectContainer container, ClientContext context) {
USKAttempt[] attempts;
long lookedUp = uskManager.lookup(origUSK);
synchronized(this) {
@@ -494,7 +511,7 @@
if(keepLastData && lastEd == lookedUp)
lastEd--; // If we want the data, then
get it for the known edition, so we always get the data, so USKInserter can
compare it and return the old edition if it is identical.
if(attempts[i].number > lastEd)
- attempts[i].schedule();
+ attempts[i].schedule(container,
context);
else {
synchronized(this) {
runningAttempts.remove(attempts[i]);
@@ -504,14 +521,15 @@
}
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
+ assert(container == null);
USKAttempt[] attempts;
synchronized(this) {
cancelled = true;
attempts = runningAttempts.toArray(new
USKAttempt[runningAttempts.size()]);
}
for(int i=0;i<attempts.length;i++)
- attempts[i].cancel();
+ attempts[i].cancel(container, context);
uskManager.onCancelled(this);
}
@@ -560,7 +578,7 @@
return !subscribers.isEmpty();
}
- public void removeSubscriber(USKCallback cb) {
+ public void removeSubscriber(USKCallback cb, ClientContext context) {
synchronized(this) {
subscribers.remove(cb);
}
@@ -585,7 +603,7 @@
public synchronized void freeLastData() {
if(lastRequestData == null) return;
- lastRequestData.free();
+ lastRequestData.free(); // USKFetcher's cannot be persistent,
so no need to removeFrom()
lastRequestData = null;
}
@@ -596,5 +614,14 @@
public long getToken() {
return -1;
}
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ throw new UnsupportedOperationException();
+ }
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing USKFetcher in database", new
Exception("error"));
+ return false;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/USKFetcherCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKFetcherCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKFetcherCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,10 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
+import freenet.keys.USK;
+
/**
* Callback interface for USK fetches. If you submit a USK fetch via
* USKManager.getFetcher, then register yourself on it as a listener, then you
@@ -11,8 +15,13 @@
public interface USKFetcherCallback extends USKCallback {
/** Failed to find any edition at all (later than or equal to the
specified hint) */
- void onFailure();
+ void onFailure(ObjectContainer container, ClientContext context);
- void onCancelled();
+ void onCancelled(ObjectContainer container, ClientContext context);
+ /** Found the latest edition. **This is terminal for a
USKFetcherCallback**. It isn't for a USKCallback subscription.
+ * @param l The edition number.
+ * @param key The key. */
+ void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean metadata, short codec, byte[] data);
+
}
Copied: trunk/freenet/src/freenet/client/async/USKFetcherTag.java (from rev
26320, branches/db4o/freenet/src/freenet/client/async/USKFetcherTag.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/USKFetcherTag.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/USKFetcherTag.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,252 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.FetchContext;
+import freenet.keys.USK;
+import freenet.node.RequestClient;
+import freenet.support.LogThresholdCallback;
+import freenet.support.Logger;
+import freenet.support.io.NativeThread;
+
+/**
+ * Not the actual fetcher. Just a tag associating a USK with the client that
should be called when
+ * the fetch has been done. Can be included in persistent requests. On
startup, all USK fetches are
+ * restarted, but this remains the same: the actual USKFetcher's are always
transient.
+ * @author toad
+ */
+class USKFetcherTag implements ClientGetState, USKFetcherCallback {
+
+ /** For persistence */
+ public final long nodeDBHandle;
+ /** The callback */
+ public final USKFetcherCallback callback;
+ /** The original USK */
+ public final USK origUSK;
+ /** The edition number found so far */
+ protected long edition;
+ /** Persistent?? */
+ public final boolean persistent;
+ /** Context */
+ public final FetchContext ctx;
+ public final boolean keepLastData;
+ /** Priority */
+ private short priority;
+ private long token;
+ private transient USKFetcher fetcher;
+ private short pollingPriorityNormal;
+ private short pollingPriorityProgress;
+ private boolean finished;
+ private final boolean ownFetchContext;
+
+ private USKFetcherTag(USK origUSK, USKFetcherCallback callback, long
nodeDBHandle, boolean persistent, ObjectContainer container, FetchContext ctx,
boolean keepLastData, long token, boolean hasOwnFetchContext) {
+ this.nodeDBHandle = nodeDBHandle;
+ this.callback = callback;
+ this.origUSK = origUSK;
+ this.edition = origUSK.suggestedEdition;
+ this.persistent = persistent;
+ this.ctx = ctx;
+ this.keepLastData = keepLastData;
+ this.token = token;
+ this.ownFetchContext = hasOwnFetchContext;
+ pollingPriorityNormal = callback.getPollingPriorityNormal();
+ pollingPriorityProgress = callback.getPollingPriorityProgress();
+ priority = pollingPriorityNormal;
+ }
+
+ /**
+ * For a persistent request, the caller must call removeFromDatabase()
when finished. Note that the caller is responsible for
+ * deleting the USKFetcherCallback and the FetchContext.
+ * @param usk
+ * @param callback
+ * @param nodeDBHandle
+ * @param persistent
+ * @param container
+ * @param ctx
+ * @param keepLast
+ * @param token
+ * @return
+ */
+ public static USKFetcherTag create(USK usk, USKFetcherCallback
callback, long nodeDBHandle, boolean persistent,
+ ObjectContainer container, FetchContext ctx, boolean
keepLast, int token, boolean hasOwnFetchContext) {
+ USKFetcherTag tag = new USKFetcherTag(usk, callback,
nodeDBHandle, persistent, container, ctx, keepLast, token, hasOwnFetchContext);
+ if(persistent) container.store(tag);
+ return tag;
+ }
+
+ synchronized void updatedEdition(long ed, ObjectContainer container) {
+ if(edition < ed) edition = ed;
+ if(persistent) container.store(this); // Update
+ }
+
+ private static final RequestClient client = new RequestClient() {
+
+ public boolean persistent() {
+ // The actual USK fetch is non-persistent, only the
tags survive a restart.
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+
+ public void start(USKManager manager, ClientContext context,
ObjectContainer container) {
+ USK usk = origUSK;
+ if(persistent)
+ container.activate(origUSK, 5);
+ if(usk.suggestedEdition < edition)
+ usk = usk.copy(edition);
+ fetcher = manager.getFetcher(usk, ctx, new
USKFetcherWrapper(usk, priority, client), keepLastData);
+ fetcher.addCallback(this);
+ fetcher.schedule(null, context); // non-persistent
+ }
+
+ public void cancel(ObjectContainer container, ClientContext context) {
+ if(fetcher != null) fetcher.cancel(null, context);
+ synchronized(this) {
+ finished = true;
+ }
+ // onCancelled() will removeFrom(), so we do NOT want to
store(this)
+ }
+
+ public long getToken() {
+ return token;
+ }
+
+ public void schedule(ObjectContainer container, ClientContext context) {
+ start(context.uskManager, context, container);
+ }
+
+ public void onCancelled(ObjectContainer container, ClientContext
context) {
+ synchronized(this) {
+ finished = true;
+ }
+ if(persistent) {
+ // If cancelled externally, and this function is called
from USKFetcher,
+ // container may be null even though we are running on
the database thread,
+ // resulting in a database leak.
+ context.jobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(callback, 1);
+ callback.onCancelled(container,
context);
+ removeFrom(container, context);
+ container.deactivate(callback, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY);
+ } else {
+ callback.onCancelled(container, context);
+ }
+ }
+
+ public void onFailure(ObjectContainer container, ClientContext context)
{
+ synchronized(this) {
+ finished = true;
+ }
+ if(persistent) {
+ if(container != null) {
+ container.activate(callback, 1);
+ callback.onFailure(container, context);
+ container.deactivate(callback, 1);
+ removeFrom(container, context);
+ } else {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(callback, 1);
+ callback.onFailure(container, context);
+ container.deactivate(callback, 1);
+ removeFrom(container, context);
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ }
+ } else {
+ callback.onFailure(container, context);
+ }
+ }
+
+ public short getPollingPriorityNormal() {
+ return pollingPriorityNormal;
+ }
+
+ public short getPollingPriorityProgress() {
+ return pollingPriorityProgress;
+ }
+
+ public void onFoundEdition(final long l, final USK key, ObjectContainer
container, ClientContext context, final boolean metadata, final short codec,
final byte[] data) {
+ synchronized(this) {
+ if(fetcher == null) {
+ Logger.error(this, "onFoundEdition but fetcher
is null - isn't onFoundEdition() terminal for USKFetcherCallback's??", new
Exception("debug"));
+ }
+ finished = true;
+ fetcher = null;
+ }
+ if(persistent) {
+ if(container != null) {
+ container.activate(callback, 1);
+ callback.onFoundEdition(l, key, container,
context, metadata, codec, data);
+ container.deactivate(callback, 1);
+ removeFrom(container, context);
+ } else {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(callback, 1);
+ callback.onFoundEdition(l, key,
container, context, metadata, codec, data);
+ container.deactivate(callback, 1);
+ removeFrom(container, context);
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ }
+ } else {
+ callback.onFoundEdition(l, key, container, context,
metadata, codec, data);
+ }
+ }
+
+ public final boolean isFinished() {
+ return finished;
+ }
+
+ private static volatile boolean logMINOR;
+// private static volatile boolean logDEBUG;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+// logDEBUG = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(logMINOR) Logger.minor(this, "Removing "+this);
+ container.activate(origUSK, 5);
+ origUSK.removeFrom(container);
+ if(ownFetchContext) {
+ container.activate(ctx, 1);
+ ctx.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
+ public boolean objectCanDeactivate(ObjectContainer container) {
+ return false;
+ }
+
+// public void objectOnNew(ObjectContainer container) {
+// if(logDEBUG) Logger.debug(this, "Storing as new: "+this);
+// }
+//
+// public void objectOnUpdate(ObjectContainer container) {
+// if(logDEBUG) Logger.debug(this, "Updating: "+this, new
Exception("debug"));
+// }
+//
+}
Modified: trunk/freenet/src/freenet/client/async/USKFetcherWrapper.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKFetcherWrapper.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKFetcherWrapper.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,10 +3,13 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
+import freenet.node.RequestClient;
/**
* Wrapper for a backgrounded USKFetcher.
@@ -15,8 +18,8 @@
final USK usk;
- public USKFetcherWrapper(USK usk, short prio, ClientRequestScheduler
chkScheduler, ClientRequestScheduler sskScheduler, Object client) {
- super(prio, chkScheduler, sskScheduler, client);
+ public USKFetcherWrapper(USK usk, short prio, RequestClient client) {
+ super(prio, client);
this.usk = usk;
}
@@ -31,24 +34,24 @@
}
@Override
- public void notifyClients() {
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
// Do nothing
}
- public void onSuccess(FetchResult result, ClientGetState state) {
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
// Ignore; we don't do anything with it because we are running
in the background.
}
- public void onFailure(FetchException e, ClientGetState state) {
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
// Ignore
}
- public void onBlockSetFinished(ClientGetState state) {
+ public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context) {
// Ignore
}
@Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore
}
@@ -57,15 +60,19 @@
return super.toString()+ ':' +usk;
}
- public void onExpectedMIME(String mime) {
+ public void onExpectedMIME(String mime, ObjectContainer container) {
// Ignore
}
- public void onExpectedSize(long size) {
+ public void onExpectedSize(long size, ObjectContainer container) {
// Ignore
}
- public void onFinalizedMetadata() {
+ public void onFinalizedMetadata(ObjectContainer container) {
// Ignore
}
+
+ public void cancel(ObjectContainer container, ClientContext context) {
+ super.cancel();
+ }
}
Modified: trunk/freenet/src/freenet/client/async/USKInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKInserter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKInserter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -7,6 +7,8 @@
import java.net.MalformedURLException;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertContext;
import freenet.client.InsertException;
import freenet.client.Metadata;
@@ -28,7 +30,7 @@
// Stuff to be passed on to the SingleBlockInserter
final BaseClientPutter parent;
- final Bucket data;
+ Bucket data;
final short compressionCodec;
final InsertContext ctx;
final PutCompletionCallback cb;
@@ -37,11 +39,12 @@
final int token;
final boolean getCHKOnly;
public final Object tokenObject;
+ final boolean persistent;
final InsertableUSK privUSK;
final USK pubUSK;
/** Scanning for latest slot */
- private USKFetcher fetcher;
+ private USKFetcherTag fetcher;
/** Insert the actual SSK */
private SingleBlockInserter sbi;
private long edition;
@@ -51,8 +54,9 @@
/** After attempting inserts on this many slots, go back to the Fetcher
*/
private static final long MAX_TRIED_SLOTS = 10;
private boolean freeData;
+ final int hashCode;
- public void schedule() throws InsertException {
+ public void schedule(ObjectContainer container, ClientContext context)
throws InsertException {
// Caller calls schedule()
// schedule() calls scheduleFetcher()
// scheduleFetcher() creates a Fetcher (set up to tell us about
author-errors as well as valid inserts)
@@ -62,7 +66,7 @@
// if that succeeds, we complete
// if that fails, we increment our index and try again (in the
callback)
// if that continues to fail 5 times, we go back to
scheduleFetcher()
- scheduleFetcher();
+ scheduleFetcher(container, context);
}
/**
@@ -70,27 +74,34 @@
* The Fetcher must be insert-mode, in other words, it must know that
we want the latest edition,
* including author errors and so on.
*/
- private void scheduleFetcher() {
+ private void scheduleFetcher(ObjectContainer container, ClientContext
context) {
+ if(persistent)
+ container.activate(pubUSK, 5);
synchronized(this) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "scheduling fetcher for
"+pubUSK.getURI());
if(finished) return;
- fetcher =
ctx.uskManager.getFetcherForInsertDontSchedule(pubUSK, parent.priorityClass,
this, parent.getClient());
+ fetcher =
context.uskManager.getFetcherForInsertDontSchedule(persistent ? pubUSK.clone()
: pubUSK, parent.priorityClass, this, parent.getClient(), container, context,
persistent);
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "scheduled: "+fetcher);
}
- fetcher.schedule();
+ if(persistent) {
+ container.store(fetcher);
+ container.store(this);
+ }
+ fetcher.schedule(container, context);
}
- public void onFoundEdition(long l, USK key) {
+ public void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean lastContentWasMetadata, short codec, byte[]
hisData) {
boolean alreadyInserted = false;
synchronized(this) {
edition = Math.max(l, edition);
consecutiveCollisions = 0;
- if((fetcher.lastContentWasMetadata() == isMetadata) &&
fetcher.hasLastData()
- && (fetcher.lastCompressionCodec() ==
compressionCodec)) {
+ if((lastContentWasMetadata == isMetadata) && hisData !=
null
+ && (codec == compressionCodec)) {
try {
+ if(persistent) container.activate(data,
1);
byte[] myData =
BucketTools.toByteArray(data);
- byte[] hisData =
BucketTools.toByteArray(fetcher.getLastData());
- fetcher.freeLastData();
if(Arrays.equals(myData, hisData)) {
// Success
alreadyInserted = true;
@@ -101,79 +112,149 @@
Logger.error(this, "Could not decode:
"+e, e);
}
}
- if(!alreadyInserted) {
+ if(parent.persistent()) {
+ container.activate(fetcher, 1);
+ container.activate(fetcher.ctx, 1);
+ fetcher.removeFrom(container, context);
+ fetcher.ctx.removeFrom(container);
fetcher = null;
+ container.store(this);
}
}
if(alreadyInserted) {
// Success!
- cb.onEncode(pubUSK.copy(edition), this);
- parent.addMustSucceedBlocks(1);
- parent.completedBlock(true);
- cb.onSuccess(this);
- if(freeData) data.free();
+ parent.addMustSucceedBlocks(1, container);
+ parent.completedBlock(true, container, context);
+ if(persistent) {
+ container.activate(cb, 1);
+ container.activate(pubUSK, 5);
+ }
+ cb.onEncode(pubUSK.copy(edition), this, container,
context);
+ cb.onSuccess(this, container, context);
+ if(freeData) {
+ data.free();
+ if(persistent) data.removeFrom(container);
+ }
} else {
- scheduleInsert();
+ scheduleInsert(container, context);
}
}
- private void scheduleInsert() {
- long edNo = Math.max(edition, ctx.uskManager.lookup(pubUSK)+1);
+ private void scheduleInsert(ObjectContainer container, ClientContext
context) {
+ long edNo = Math.max(edition,
context.uskManager.lookup(pubUSK)+1);
+ if(persistent) {
+ container.activate(privUSK, 5);
+ container.activate(pubUSK, 5);
+ }
synchronized(this) {
if(finished) return;
edition = edNo;
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "scheduling insert for
"+pubUSK.getURI()+ ' ' +edition);
sbi = new SingleBlockInserter(parent, data,
compressionCodec, privUSK.getInsertableSSK(edition).getInsertURI(),
- ctx, this, isMetadata, sourceLength,
token, getCHKOnly, false, true /* we don't use it */, tokenObject, freeData);
+ ctx, this, isMetadata, sourceLength,
token, getCHKOnly, false, true /* we don't use it */, tokenObject, container,
context, parent.persistent(), false);
}
try {
- sbi.schedule();
+ sbi.schedule(container, context);
+ if(persistent) container.store(this);
} catch (InsertException e) {
- cb.onFailure(e, this);
synchronized(this) {
finished = true;
}
- if(freeData)
+ if(freeData) {
+ if(persistent) container.activate(data, 1);
data.free();
+ data.removeFrom(container);
+ synchronized(this) {
+ data = null;
+ }
+ }
+ if(persistent) container.store(this);
+ cb.onFailure(e, this, container, context);
}
}
- public synchronized void onSuccess(ClientPutState state) {
- cb.onEncode(pubUSK.copy(edition), this);
- cb.onSuccess(this);
+ public synchronized void onSuccess(ClientPutState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) container.activate(pubUSK, 5);
+ USK newEdition = pubUSK.copy(edition);
finished = true;
sbi = null;
FreenetURI targetURI = pubUSK.getSSK(edition).getURI();
- FreenetURI realURI = ((SingleBlockInserter)state).getURI();
+ FreenetURI realURI =
((SingleBlockInserter)state).getURI(container, context);
if(!targetURI.equals(realURI))
Logger.error(this, "URI should be "+targetURI+"
actually is "+realURI);
else {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "URI should be "+targetURI+"
actually is "+realURI);
- ctx.uskManager.update(pubUSK, edition);
+ context.uskManager.update(pubUSK, edition, context);
}
+ if(persistent) state.removeFrom(container, context);
+ if(freeData) {
+ if(persistent) container.activate(data, 1);
+ data.free();
+ if(persistent) data.removeFrom(container);
+ synchronized(this) {
+ data = null;
+ }
+ if(persistent) container.store(this);
+ }
+ if(persistent) {
+ container.activate(cb, 1);
+ container.store(this);
+ }
+ cb.onEncode(newEdition, this, container, context);
+ cb.onSuccess(this, container, context);
// FINISHED!!!! Yay!!!
}
- public synchronized void onFailure(InsertException e, ClientPutState
state) {
+ public void onFailure(InsertException e, ClientPutState state,
ObjectContainer container, ClientContext context) {
+ ClientPutState oldSBI;
+ synchronized(this) {
+ oldSBI = sbi;
sbi = null;
if(e.getMode() == InsertException.COLLISION) {
// Try the next slot
edition++;
- if(consecutiveCollisions++ > MAX_TRIED_SLOTS)
- scheduleFetcher();
+ consecutiveCollisions++;
+ if(persistent) container.store(this);
+ if(consecutiveCollisions > MAX_TRIED_SLOTS)
+ scheduleFetcher(container, context);
else
- scheduleInsert();
+ scheduleInsert(container, context);
} else {
- cb.onFailure(e, state);
+ if(freeData) {
+ if(persistent) container.activate(data, 1);
+ data.free();
+ data.removeFrom(container);
+ synchronized(this) {
+ data = null;
+ }
+ if(persistent) container.store(this);
+ }
+ if(persistent)
+ container.activate(cb, 1);
+ cb.onFailure(e, state, container, context);
}
+ }
+ if(state != null && persistent) {
+ state.removeFrom(container, context);
+ }
+ if(oldSBI != null && oldSBI != state) {
+ container.activate(oldSBI, 1);
+ oldSBI.removeFrom(container, context);
+ }
}
+ public int hashCode() {
+ return hashCode;
+ }
+
public USKInserter(BaseClientPutter parent, Bucket data, short
compressionCodec, FreenetURI uri,
InsertContext ctx, PutCompletionCallback cb, boolean
isMetadata, int sourceLength, int token,
- boolean getCHKOnly, boolean addToParent, Object
tokenObject, boolean freeData) throws MalformedURLException {
+ boolean getCHKOnly, boolean addToParent, Object
tokenObject, ObjectContainer container, ClientContext context, boolean
freeData, boolean persistent) throws MalformedURLException {
+ this.hashCode = super.hashCode();
this.tokenObject = tokenObject;
+ this.persistent = persistent;
this.parent = parent;
this.data = data;
this.compressionCodec = compressionCodec;
@@ -184,11 +265,11 @@
this.token = token;
this.getCHKOnly = getCHKOnly;
if(addToParent) {
- parent.addBlock();
- parent.addMustSucceedBlocks(1);
- parent.notifyClients();
+ parent.addBlock(container);
+ parent.addMustSucceedBlocks(1, container);
+ parent.notifyClients(container, context);
}
- privUSK = InsertableUSK.createInsertable(uri);
+ privUSK = InsertableUSK.createInsertable(uri, persistent);
pubUSK = privUSK.getUSK();
edition = pubUSK.suggestedEdition;
this.freeData = freeData;
@@ -198,45 +279,74 @@
return parent;
}
- public void cancel() {
- if(fetcher != null)
- fetcher.cancel();
- if(sbi != null)
- sbi.cancel();
+ public void cancel(ObjectContainer container, ClientContext context) {
+ USKFetcherTag tag;
+ boolean persist = persistent;
synchronized(this) {
+ if(finished) return;
finished = true;
+ tag = fetcher;
+ fetcher = null;
}
- if(freeData)
- data.free();
- cb.onFailure(new InsertException(InsertException.CANCELLED),
this);
+ if(persistent) container.store(this);
+ if(tag != null) {
+ tag.cancel(container, context);
+ if(persist) container.activate(this, 1); // May have
been deactivated by callbacks
+ }
+ if(sbi != null) {
+ sbi.cancel(container, context); // will call onFailure,
which will removeFrom()
+ if(persist) container.activate(this, 1); // May have
been deactivated by callbacks
+ }
+ if(freeData) {
+ if(freeData) {
+ if(persistent) container.activate(data, 1);
+ data.free();
+ if(persistent) data.removeFrom(container);
+ synchronized(this) {
+ data = null;
+ }
+ if(persistent) container.store(this);
+ }
+ }
+ if(persistent) container.activate(cb, 1);
+ cb.onFailure(new InsertException(InsertException.CANCELLED),
this, container, context);
}
- public void onFailure() {
+ public void onFailure(ObjectContainer container, ClientContext context)
{
Logger.error(this, "Fetcher failed", new Exception("debug"));
- scheduleInsert();
+ scheduleInsert(container, context);
}
- public synchronized void onCancelled() {
+ public synchronized void onCancelled(ObjectContainer container,
ClientContext context) {
+ if(fetcher != null) {
+ if(parent.persistent()) {
+ container.activate(fetcher, 1);
+ container.activate(fetcher.ctx, 1);
+ fetcher.ctx.removeFrom(container);
+ fetcher.removeFrom(container, context);
+ }
+ fetcher = null;
+ }
if(finished) return;
Logger.error(this, "Unexpected onCancelled()", new
Exception("error"));
- cancel();
+ cancel(container, context);
}
- public void onEncode(BaseClientKey key, ClientPutState state) {
+ public void onEncode(BaseClientKey key, ClientPutState state,
ObjectContainer container, ClientContext context) {
// Ignore
}
- public void onTransition(ClientPutState oldState, ClientPutState
newState) {
+ public void onTransition(ClientPutState oldState, ClientPutState
newState, ObjectContainer container) {
// Shouldn't happen
Logger.error(this, "Got onTransition("+oldState+ ',' +newState+
')');
}
- public void onMetadata(Metadata m, ClientPutState state) {
+ public void onMetadata(Metadata m, ClientPutState state,
ObjectContainer container, ClientContext context) {
// Shouldn't happen
Logger.error(this, "Got onMetadata("+m+ ',' +state+ ')');
}
- public void onBlockSetFinished(ClientPutState state) {
+ public void onBlockSetFinished(ClientPutState state, ObjectContainer
container, ClientContext context) {
// Ignore
}
@@ -248,7 +358,7 @@
return null;
}
- public void onFetchable(ClientPutState state) {
+ public void onFetchable(ClientPutState state, ObjectContainer
container) {
// Ignore
}
@@ -260,4 +370,48 @@
return parent.getPriorityClass();
}
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing from database: "+this, new
Exception("debug"));
+ // parent will remove self
+ if(freeData && data != null && container.ext().isStored(data)) {
+ try {
+ data.free();
+ } catch (Throwable t) {
+ Logger.error(this, "Already freed? Caught in
removeFrom on "+this+" : "+data+" : "+t, t);
+ }
+ data.removeFrom(container);
+ }
+ // ctx is passed in, cb will deal with
+ // cb will remove self
+ // tokenObject will be removed by creator
+ container.activate(privUSK, 5);
+ privUSK.removeFrom(container);
+ container.activate(pubUSK, 5);
+ pubUSK.removeFrom(container);
+ if(fetcher != null) {
+ Logger.error(this, "Fetcher tag still present:
"+fetcher+" in removeFrom() for "+this, new Exception("debug"));
+ container.activate(fetcher, 1);
+ container.activate(fetcher.ctx, 1);
+ fetcher.ctx.removeFrom(container);
+ fetcher.removeFrom(container, context);
+ }
+ if(sbi != null) {
+ Logger.error(this, "sbi still present: "+sbi+" in
removeFrom() for "+this);
+ container.activate(sbi, 1);
+ sbi.removeFrom(container, context);
+ }
+ container.delete(this);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.minor(this, "objectCanNew() on "+this, new
Exception("debug"));
+ return true;
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ Logger.minor(this, "objectCanUpdate() on "+this, new
Exception("debug"));
+ return true;
+ }
+
}
Modified: trunk/freenet/src/freenet/client/async/USKManager.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKManager.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKManager.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -6,9 +6,12 @@
import java.util.HashMap;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.keys.USK;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.support.Executor;
import freenet.support.LRUQueue;
@@ -17,8 +20,10 @@
/**
* Tracks the latest version of every known USK.
* Also does auto-updates.
+ *
+ * Note that this is a transient class. It is not stored in the database. All
fetchers and subscriptions are likewise transient.
*/
-public class USKManager {
+public class USKManager implements RequestClient {
/** Latest version by blanked-edition-number USK */
final HashMap latestVersionByClearUSK;
@@ -39,18 +44,14 @@
final HashMap checkersByUSK;
final FetchContext backgroundFetchContext;
- final ClientRequestScheduler chkRequestScheduler;
- final ClientRequestScheduler sskRequestScheduler;
final Executor executor;
-
+ private ClientContext context;
+
public USKManager(NodeClientCore core) {
backgroundFetchContext =
core.makeClient(RequestStarter.UPDATE_PRIORITY_CLASS).getFetchContext();
backgroundFetchContext.followRedirects = false;
- backgroundFetchContext.uskManager = this;
- this.chkRequestScheduler =
core.requestStarters.chkFetchScheduler;
- this.sskRequestScheduler =
core.requestStarters.sskFetchScheduler;
latestVersionByClearUSK = new HashMap();
subscribersByClearUSK = new HashMap();
fetchersByUSK = new HashMap();
@@ -60,6 +61,11 @@
executor = core.getExecutor();
}
+ public void init(ObjectContainer container, ClientContext context) {
+ this.context = context;
+ USKManagerPersistent.init(this, container, context);
+ }
+
/**
* Look up the latest known version of the given USK.
* @return The latest known edition number, or -1.
@@ -71,7 +77,12 @@
else return -1;
}
- public synchronized USKFetcher getFetcher(USK usk, FetchContext ctx,
+ public USKFetcherTag getFetcher(USK usk, FetchContext ctx, boolean
keepLast, boolean persistent,
+ USKFetcherCallback callback, boolean ownFetchContext,
ObjectContainer container, ClientContext context) {
+ return USKFetcherTag.create(usk, callback,
context.nodeDBHandle, persistent, container, ctx, keepLast, 0, ownFetchContext);
+ }
+
+ synchronized USKFetcher getFetcher(USK usk, FetchContext ctx,
ClientRequester requester, boolean keepLastData) {
USKFetcher f = (USKFetcher) fetchersByUSK.get(usk);
USK clear = usk.clearCopy();
@@ -85,15 +96,12 @@
fetchersByUSK.put(usk, f);
return f;
}
-
- public USKFetcher getFetcherForInsertDontSchedule(USK usk, short
prioClass, USKFetcherCallback cb, Object client) {
- USKFetcher f = new USKFetcher(usk, this,
backgroundFetchContext,
- new USKFetcherWrapper(usk, prioClass,
chkRequestScheduler, sskRequestScheduler, client), 3, false, true);
- f.addCallback(cb);
- return f;
+
+ public USKFetcherTag getFetcherForInsertDontSchedule(USK usk, short
prioClass, USKFetcherCallback cb, RequestClient client, ObjectContainer
container, ClientContext context, boolean persistent) {
+ return getFetcher(usk, persistent ? new
FetchContext(backgroundFetchContext, FetchContext.IDENTICAL_MASK, false, null)
: backgroundFetchContext, true, client.persistent(), cb, true, container,
context);
}
- public void startTemporaryBackgroundFetcher(USK usk) {
+ public void startTemporaryBackgroundFetcher(USK usk, ClientContext
context) {
USK clear = usk.clearCopy();
USKFetcher sched = null;
Vector toCancel = null;
@@ -106,7 +114,7 @@
// }
USKFetcher f = (USKFetcher)
backgroundFetchersByClearUSK.get(clear);
if(f == null) {
- f = new USKFetcher(usk, this,
backgroundFetchContext, new USKFetcherWrapper(usk,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
this), 3, true, false);
+ f = new USKFetcher(usk, this,
backgroundFetchContext, new USKFetcherWrapper(usk,
RequestStarter.UPDATE_PRIORITY_CLASS, this), 3, true, false);
sched = f;
backgroundFetchersByClearUSK.put(clear, f);
}
@@ -129,13 +137,13 @@
if(toCancel != null) {
for(int i=0;i<toCancel.size();i++) {
USKFetcher fetcher = (USKFetcher)
toCancel.get(i);
- fetcher.cancel();
+ fetcher.cancel(null, context);
}
}
- if(sched != null) sched.schedule();
+ if(sched != null) sched.schedule(null, context);
}
- void update(final USK origUSK, final long number) {
+ void update(final USK origUSK, final long number, final ClientContext
context) {
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Updating "+origUSK.getURI()+"
: "+number);
USK clear = origUSK.clearCopy();
@@ -152,14 +160,15 @@
}
if(callbacks != null) {
// Run off-thread, because of locking, and because
client callbacks may take some time
- final USK usk = origUSK.copy(number);
- for(final USKCallback callback : callbacks)
- executor.execute(new Runnable() {
- public void run() {
- callback.onFoundEdition(number,
usk);
- }
- }, "USKManager callback executor for "
+callback);
- }
+ final USK usk = origUSK.copy(number);
+ for(final USKCallback callback :
callbacks)
+
context.mainExecutor.execute(new Runnable() {
+ public void run() {
+
callback.onFoundEdition(number, usk, null, // non-persistent
+
context, false, (short)-1, null);
+ }
+ }, "USKManager callback
executor for " +callback);
+ }
}
/**
@@ -167,7 +176,8 @@
* updated. Note that this does not imply that the USK will be
* checked on a regular basis, unless runBackgroundFetch=true.
*/
- public void subscribe(USK origUSK, USKCallback cb, boolean
runBackgroundFetch, Object client) {
+ public void subscribe(USK origUSK, USKCallback cb, boolean
runBackgroundFetch, RequestClient client) {
+ if(client.persistent()) throw new
UnsupportedOperationException("USKManager subscriptions cannot be persistent");
USKFetcher sched = null;
long ed = origUSK.suggestedEdition;
if(ed < 0) {
@@ -193,7 +203,7 @@
if(runBackgroundFetch) {
USKFetcher f = (USKFetcher)
backgroundFetchersByClearUSK.get(clear);
if(f == null) {
- f = new USKFetcher(origUSK, this,
backgroundFetchContext, new USKFetcherWrapper(origUSK,
RequestStarter.UPDATE_PRIORITY_CLASS, chkRequestScheduler, sskRequestScheduler,
client), 10, true, false);
+ f = new USKFetcher(origUSK, this,
backgroundFetchContext, new USKFetcherWrapper(origUSK,
RequestStarter.UPDATE_PRIORITY_CLASS, client), 10, true, false);
sched = f;
backgroundFetchersByClearUSK.put(clear,
f);
}
@@ -201,12 +211,12 @@
}
}
if(curEd > ed)
- cb.onFoundEdition(curEd, origUSK.copy(curEd));
+ cb.onFoundEdition(curEd, origUSK.copy(curEd), null,
context, false, (short)-1, null);
final USKFetcher fetcher = sched;
if(fetcher != null) {
executor.execute(new Runnable() {
public void run() {
- fetcher.schedule();
+ fetcher.schedule(null, context);
}
}, "USKManager.schedule for "+fetcher);
}
@@ -244,7 +254,7 @@
else
Logger.error(this,
"Unsubscribing "+cb+" for "+origUSK+" but not already subscribed, remaining
"+newCallbacks.length+" callbacks", new Exception("error"));
} else {
- f.removeSubscriber(cb);
+ f.removeSubscriber(cb, context);
if(!f.hasSubscribers()) {
if(!temporaryBackgroundFetchersLRU.contains(clear)) {
toCancel = f;
@@ -254,7 +264,7 @@
}
}
}
- if(toCancel != null) toCancel.cancel();
+ if(toCancel != null) toCancel.cancel(null, context);
}
/**
@@ -267,8 +277,8 @@
* @param fctx Fetcher context for actually fetching the keys. Not used
by the USK polling.
* @return
*/
- public USKRetriever subscribeContent(USK origUSK, USKRetrieverCallback
cb, boolean runBackgroundFetch, FetchContext fctx, short prio, Object client) {
- USKRetriever ret = new USKRetriever(fctx, prio,
chkRequestScheduler, sskRequestScheduler, client, cb);
+ public USKRetriever subscribeContent(USK origUSK, USKRetrieverCallback
cb, boolean runBackgroundFetch, FetchContext fctx, short prio, RequestClient
client) {
+ USKRetriever ret = new USKRetriever(fctx, prio, client, cb);
subscribe(origUSK, ret, runBackgroundFetch, client);
return ret;
}
@@ -315,4 +325,12 @@
}
}
}
+
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Copied: trunk/freenet/src/freenet/client/async/USKManagerPersistent.java (from
rev 26320,
branches/db4o/freenet/src/freenet/client/async/USKManagerPersistent.java)
===================================================================
--- trunk/freenet/src/freenet/client/async/USKManagerPersistent.java
(rev 0)
+++ trunk/freenet/src/freenet/client/async/USKManagerPersistent.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,26 @@
+package freenet.client.async;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+
+/**
+ * The persistent part of a USKManager.
+ * @author toad
+ *
+ */
+public class USKManagerPersistent {
+
+ static void init(USKManager manager, ObjectContainer container, final
ClientContext context) {
+ ObjectSet<USKFetcherTag> set = container.query(new
Predicate<USKFetcherTag>() {
+ public boolean match(USKFetcherTag tag) {
+ if(tag.nodeDBHandle != context.nodeDBHandle)
return false;
+ if(tag.isFinished()) return false;
+ return true;
+ }
+ });
+ while(set.hasNext())
+ set.next().start(manager, context, container);
+ }
+
+}
Modified: trunk/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKProxyCompletionCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.async;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.keys.FreenetURI;
@@ -11,47 +13,71 @@
public class USKProxyCompletionCallback implements GetCompletionCallback {
final USK usk;
- final USKManager uskManager;
final GetCompletionCallback cb;
+ final boolean persistent;
- public USKProxyCompletionCallback(USK usk, USKManager um,
GetCompletionCallback cb) {
+ public USKProxyCompletionCallback(USK usk, GetCompletionCallback cb,
boolean persistent) {
this.usk = usk;
- this.uskManager = um;
this.cb = cb;
+ this.persistent = persistent;
}
- public void onSuccess(FetchResult result, ClientGetState state) {
- uskManager.update(usk, usk.suggestedEdition);
- cb.onSuccess(result, state);
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ if(container != null && persistent) {
+ container.activate(cb, 1);
+ container.activate(usk, 5);
+ }
+ context.uskManager.update(usk, usk.suggestedEdition, context);
+ cb.onSuccess(result, state, container, context);
+ if(persistent) removeFrom(container);
}
- public void onFailure(FetchException e, ClientGetState state) {
+ private void removeFrom(ObjectContainer container) {
+ container.activate(usk, 5);
+ usk.removeFrom(container);
+ container.delete(this);
+ }
+
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
+ if(persistent) {
+ container.activate(cb, 1);
+ container.activate(usk, 5);
+ }
FreenetURI uri = e.newURI;
if(uri != null) {
uri = usk.turnMySSKIntoUSK(uri);
e = new FetchException(e, uri);
}
- cb.onFailure(e, state);
+ cb.onFailure(e, state, container, context);
+ if(persistent) removeFrom(container);
}
- public void onBlockSetFinished(ClientGetState state) {
- cb.onBlockSetFinished(state);
+ public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context) {
+ if(container != null && persistent)
+ container.activate(cb, 1);
+ cb.onBlockSetFinished(state, container, context);
}
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore
}
- public void onExpectedMIME(String mime) {
- cb.onExpectedMIME(mime);
+ public void onExpectedMIME(String mime, ObjectContainer container) {
+ if(container != null && persistent)
+ container.activate(cb, 1);
+ cb.onExpectedMIME(mime, container);
}
- public void onExpectedSize(long size) {
- cb.onExpectedSize(size);
+ public void onExpectedSize(long size, ObjectContainer container) {
+ if(container != null && persistent)
+ container.activate(cb, 1);
+ cb.onExpectedSize(size, container);
}
- public void onFinalizedMetadata() {
- cb.onFinalizedMetadata();
+ public void onFinalizedMetadata(ObjectContainer container) {
+ if(container != null && persistent)
+ container.activate(cb, 1);
+ cb.onFinalizedMetadata(container);
}
}
Modified: trunk/freenet/src/freenet/client/async/USKRetriever.java
===================================================================
--- trunk/freenet/src/freenet/client/async/USKRetriever.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/async/USKRetriever.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveContext;
import freenet.client.ClientMetadata;
import freenet.client.FetchException;
@@ -12,6 +14,7 @@
import freenet.client.FetchContext;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
+import freenet.node.RequestClient;
import freenet.support.Logger;
/**
@@ -23,14 +26,15 @@
final FetchContext ctx;
final USKRetrieverCallback cb;
- public USKRetriever(FetchContext fctx, short prio,
ClientRequestScheduler chkSched,
- ClientRequestScheduler sskSched, Object client,
USKRetrieverCallback cb) {
- super(prio, chkSched, sskSched, client);
+ public USKRetriever(FetchContext fctx, short prio,
+ RequestClient client, USKRetrieverCallback cb) {
+ super(prio, client);
+ if(client.persistent()) throw new
UnsupportedOperationException("USKRetriever cannot be persistent");
this.ctx = fctx;
this.cb = cb;
}
- public void onFoundEdition(long l, USK key) {
+ public void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean metadata, short codec, byte[] data) {
if(l < 0) {
Logger.error(this, "Found negative edition: "+l+" for
"+key+" !!!");
return;
@@ -42,9 +46,9 @@
FreenetURI uri = key.getSSK(l).getURI();
try {
SingleFileFetcher getter =
- (SingleFileFetcher)
SingleFileFetcher.create(this, this, new ClientMetadata(), uri, ctx, new
ArchiveContext(ctx.maxTempLength, ctx.maxArchiveLevels),
- ctx.maxNonSplitfileRetries, 0,
true, l, true, null, false);
- getter.schedule();
+ (SingleFileFetcher)
SingleFileFetcher.create(this, this, uri, ctx, new
ArchiveContext(ctx.maxTempLength, ctx.maxArchiveLevels),
+ ctx.maxNonSplitfileRetries, 0,
true, l, true, null, false, null, context);
+ getter.schedule(null, context);
} catch (MalformedURLException e) {
Logger.error(this, "Impossible: "+e, e);
} catch (FetchException e) {
@@ -52,17 +56,17 @@
}
}
- public void onSuccess(FetchResult result, ClientGetState state) {
+ public void onSuccess(FetchResult result, ClientGetState state,
ObjectContainer container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Success on "+this+" from "+state+"
: length "+result.size()+" mime type "+result.getMimeType());
cb.onFound(state.getToken(), result);
}
- public void onFailure(FetchException e, ClientGetState state) {
+ public void onFailure(FetchException e, ClientGetState state,
ObjectContainer container, ClientContext context) {
Logger.error(this, "Found edition "+state.getToken()+" but
failed to fetch edition: "+e, e);
}
- public void onBlockSetFinished(ClientGetState state) {
+ public void onBlockSetFinished(ClientGetState state, ObjectContainer
container, ClientContext context) {
// Ignore
}
@@ -77,24 +81,24 @@
}
@Override
- public void notifyClients() {
+ public void notifyClients(ObjectContainer container, ClientContext
context) {
// Ignore for now
}
@Override
- public void onTransition(ClientGetState oldState, ClientGetState
newState) {
+ public void onTransition(ClientGetState oldState, ClientGetState
newState, ObjectContainer container) {
// Ignore
}
- public void onExpectedMIME(String mime) {
+ public void onExpectedMIME(String mime, ObjectContainer container) {
// Ignore
}
- public void onExpectedSize(long size) {
+ public void onExpectedSize(long size, ObjectContainer container) {
// Ignore
}
- public void onFinalizedMetadata() {
+ public void onFinalizedMetadata(ObjectContainer container) {
// Ignore
}
@@ -106,4 +110,8 @@
return cb.getPollingPriorityProgress();
}
+ public void cancel(ObjectContainer container, ClientContext context) {
+ super.cancel();
+ }
+
}
Modified: trunk/freenet/src/freenet/client/events/ClientEventListener.java
===================================================================
--- trunk/freenet/src/freenet/client/events/ClientEventListener.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/ClientEventListener.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,7 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.events;
+import com.db4o.ObjectContainer;
+import freenet.client.async.ClientContext;
+
+
/**
* Event handling for clients.
*
@@ -15,7 +19,17 @@
/**
* Hears an event.
+ * @param container The database context the event was generated in.
+ * NOTE THAT IT MAY NOT HAVE BEEN GENERATED IN A DATABASE CONTEXT AT ALL:
+ * In this case, container will be null, and you should use context to
schedule a DBJob.
**/
- public void receive(ClientEvent ce);
+ public void receive(ClientEvent ce, ObjectContainer maybeContainer,
ClientContext context);
+ /**
+ * Called when the EventProducer gets removeFrom(ObjectContainer).
+ * If the listener is the main listener which probably called
removeFrom(), it should do nothing.
+ * If it's a tag-along but request specific listener, it may need to
remove itself.
+ */
+ public void onRemoveEventProducer(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/client/events/ClientEventProducer.java
===================================================================
--- trunk/freenet/src/freenet/client/events/ClientEventProducer.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/ClientEventProducer.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,7 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.events;
+import com.db4o.ObjectContainer;
+import freenet.client.async.ClientContext;
+
+
/**
* Event handling for clients.
*
@@ -15,7 +19,7 @@
* Sends the event to all registered EventListeners.
* @param ce the ClientEvent to raise
*/
- void produceEvent(ClientEvent ce);
+ void produceEvent(ClientEvent ce, ObjectContainer maybeContainer,
ClientContext context);
/**
* Adds an EventListener that will receive all events produced
@@ -31,6 +35,8 @@
* @return true if a Listener was removed, false otherwise.
*/
boolean removeEventListener(ClientEventListener cel);
+
+ void removeFrom(ObjectContainer container);
}
Modified: trunk/freenet/src/freenet/client/events/EventDumper.java
===================================================================
--- trunk/freenet/src/freenet/client/events/EventDumper.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/EventDumper.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,16 +5,27 @@
import java.io.PrintWriter;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+
public class EventDumper implements ClientEventListener {
final PrintWriter pw;
+ final boolean removeWithProducer;
- public EventDumper(PrintWriter writer) {
+ public EventDumper(PrintWriter writer, boolean removeWithProducer) {
this.pw = writer;
+ this.removeWithProducer = removeWithProducer;
}
- public void receive(ClientEvent ce) {
+ public void receive(ClientEvent ce, ObjectContainer container,
ClientContext context) {
pw.println(ce.getDescription());
}
+ public void onRemoveEventProducer(ObjectContainer container) {
+ if(removeWithProducer)
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/client/events/EventLogger.java
===================================================================
--- trunk/freenet/src/freenet/client/events/EventLogger.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/EventLogger.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,9 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.events;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.support.Logger;
/**
@@ -13,9 +16,11 @@
public class EventLogger implements ClientEventListener {
final int logPrio;
+ final boolean removeWithProducer;
- public EventLogger(int prio) {
+ public EventLogger(int prio, boolean removeWithProducer) {
logPrio = prio;
+ this.removeWithProducer = removeWithProducer;
}
/**
@@ -24,7 +29,12 @@
* @param ce
* The event that occured
*/
- public void receive(ClientEvent ce) {
+ public void receive(ClientEvent ce, ObjectContainer container,
ClientContext context) {
Logger.logStatic(ce, ce.getDescription(), logPrio);
}
+
+ public void onRemoveEventProducer(ObjectContainer container) {
+ if(removeWithProducer)
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/events/SimpleEventProducer.java
===================================================================
--- trunk/freenet/src/freenet/client/events/SimpleEventProducer.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/SimpleEventProducer.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -6,6 +6,9 @@
import java.util.NoSuchElementException;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.support.Logger;
/**
@@ -16,14 +19,14 @@
**/
public class SimpleEventProducer implements ClientEventProducer {
- private Vector listeners;
+ private Vector<ClientEventListener> listeners;
/**
* Create a new SimpleEventProducer
*
**/
public SimpleEventProducer() {
- listeners = new Vector();
+ listeners = new Vector<ClientEventListener>();
}
/** Create a new SimpleEventProducer with the given listeners. */
@@ -49,11 +52,16 @@
/**
* Sends the ClientEvent to all registered listeners of this object.
**/
- public void produceEvent(ClientEvent ce) {
- for (Enumeration e = listeners.elements() ;
+ public void produceEvent(ClientEvent ce, ObjectContainer container,
ClientContext context) {
+ if(container != null)
+ container.activate(listeners, 1);
+ for (Enumeration<ClientEventListener> e = listeners.elements() ;
e.hasMoreElements();) {
try {
- ((ClientEventListener) e.nextElement()).receive(ce);
+ ClientEventListener cel = e.nextElement();
+ if(container != null)
+ container.activate(cel, 1);
+ cel.receive(ce, container, context);
} catch (NoSuchElementException ne) {
Logger.normal(this, "Concurrent modification in "+
"produceEvent!: "+this);
@@ -78,4 +86,15 @@
for (int i = 0 ; i < cela.length ; i++)
addEventListener(cela[i]);
}
+
+ public void removeFrom(ObjectContainer container) {
+ if(container != null)
+ container.activate(listeners, 1);
+ ClientEventListener[] list = listeners.toArray(new
ClientEventListener[listeners.size()]);
+ listeners.clear();
+ container.delete(listeners);
+ for(int i=0;i<list.length;i++)
+ list[i].onRemoveEventProducer(container);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/events/SplitfileProgressEvent.java
===================================================================
--- trunk/freenet/src/freenet/client/events/SplitfileProgressEvent.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/client/events/SplitfileProgressEvent.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.client.events;
+import com.db4o.ObjectContainer;
+
import freenet.support.Logger;
public class SplitfileProgressEvent implements ClientEvent {
@@ -63,4 +65,8 @@
return code;
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/clients/http/FProxyToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/FProxyToadlet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/FProxyToadlet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -11,6 +11,8 @@
import java.util.HashSet;
import java.util.Set;
+import com.db4o.ObjectContainer;
+
import freenet.client.DefaultMIMETypes;
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -26,6 +28,7 @@
import freenet.l10n.L10n;
import freenet.node.Node;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.support.HTMLEncoder;
import freenet.support.HTMLNode;
@@ -478,7 +481,13 @@
try {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "FProxy fetching "+key+"
("+maxSize+ ')');
- FetchResult result = fetch(key, maxSize, httprequest /*
fixme replace if HTTPRequest ever becomes comparable */);
+ FetchResult result = fetch(key, maxSize, new
RequestClient() {
+ public boolean persistent() {
+ return false;
+ }
+ public void removeFrom(ObjectContainer
container) {
+ throw new
UnsupportedOperationException();
+ } });
// Now, is it safe?
Modified: trunk/freenet/src/freenet/clients/http/QueueToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/QueueToadlet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/QueueToadlet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -26,8 +26,15 @@
import java.util.LinkedList;
import java.util.List;
+import com.db4o.ObjectContainer;
+
import freenet.client.DefaultMIMETypes;
+import freenet.client.FetchResult;
import freenet.client.HighLevelSimpleClient;
+import freenet.client.MetadataUnresolvedException;
+import freenet.client.TempFetchResult;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.keys.FreenetURI;
import freenet.l10n.L10n;
import freenet.node.NodeClientCore;
@@ -54,6 +61,8 @@
import freenet.support.io.BucketTools;
import freenet.support.io.Closer;
import freenet.support.io.FileBucket;
+import freenet.support.io.NativeThread;
+import java.util.StringTokenizer;
public class QueueToadlet extends Toadlet implements
RequestCompletionCallback, LinkEnabledCallback {
@@ -126,7 +135,7 @@
}
}
- MultiValueTable<String, String> responseHeaders
= new MultiValueTable<String, String>();
+ MultiValueTable responseHeaders = new
MultiValueTable<String, String>();
responseHeaders.put("Location",
"/files/?key="+insertURI.toASCIIString());
ctx.sendReplyHeaders(302, "Found",
responseHeaders, null, 0);
return;
@@ -134,7 +143,7 @@
String pass = request.getPartAsString("formPassword",
32);
if ((pass.length() == 0) ||
!pass.equals(core.formPassword)) {
- MultiValueTable<String, String> headers = new
MultiValueTable<String, String>();
+ MultiValueTable headers = new
MultiValueTable<String, String>();
headers.put("Location", "/queue/");
ctx.sendReplyHeaders(302, "Found", headers,
null, 0);
if(logMINOR) Logger.minor(this, "No
formPassword: "+pass);
@@ -145,7 +154,7 @@
String identifier =
request.getPartAsString("identifier", MAX_IDENTIFIER_LENGTH);
if(logMINOR) Logger.minor(this, "Removing
"+identifier);
try {
- fcp.removeGlobalRequest(identifier);
+
fcp.removeGlobalRequestBlocking(identifier);
} catch (MessageInvalidException e) {
this.sendErrorPage(ctx, 200,
L10n.getString("QueueToadlet.failedToRemoveRequest"),
@@ -160,45 +169,22 @@
} else if(request.isPartSet("restart_request") &&
(request.getPartAsString("restart_request", 32).length() > 0)) {
String identifier =
request.getPartAsString("identifier", MAX_IDENTIFIER_LENGTH);
if(logMINOR) Logger.minor(this, "Restarting
"+identifier);
- ClientRequest[] clientRequests =
fcp.getGlobalRequests();
- for (int requestIndex = 0, requestCount =
clientRequests.length; requestIndex < requestCount; requestIndex++) {
- ClientRequest clientRequest =
clientRequests[requestIndex];
- if
(clientRequest.getIdentifier().equals(identifier)) {
- clientRequest.restartAsync();
- }
- }
- fcp.forceStorePersistentRequests();
+ fcp.restartBlocking(identifier);
writePermanentRedirect(ctx, "Done", "/queue/");
return;
} else if(request.isPartSet("remove_AllRequests") &&
(request.getPartAsString("remove_AllRequests", 32).length() > 0)) {
- ClientRequest[] reqs = fcp.getGlobalRequests();
- if(logMINOR) Logger.minor(this, "Request count:
"+reqs.length);
+ // FIXME panic button should just dump the
entire database ???
+ // FIXME what about non-global requests ???
- StringBuilder failedIdentifiers = new
StringBuilder();
+ boolean success =
fcp.removeAllGlobalRequestsBlocking();
- for(int i=0; i<reqs.length ; i++){
- String identifier =
reqs[i].getIdentifier();
- if(logMINOR) Logger.minor(this,
"Removing "+identifier);
- try {
-
fcp.removeGlobalRequest(identifier);
- } catch (MessageInvalidException e) {
-
failedIdentifiers.append(identifier + ' ' + e.getMessage() + ';');
- Logger.error(this, "Failed to
remove " + identifier + ':' + e.getMessage());
- continue;
- }
- }
-
- if(failedIdentifiers.length() > 0)
+ if(!success)
this.sendErrorPage(ctx, 200,
L10n.getString("QueueToadlet.failedToRemoveRequest"),
-
L10n.getString("QueueToadlet.failedToRemoveId",
- new
String[]{ "id" },
- new
String[]{ failedIdentifiers.toString() }
- ));
+
L10n.getString("QueueToadlet.failedToRemoveAll"));
else
writePermanentRedirect(ctx, "Done",
"/queue/");
- fcp.forceStorePersistentRequests();
return;
}else if(request.isPartSet("download")) {
// Queue a download
@@ -220,7 +206,7 @@
String persistence =
request.getPartAsString("persistence", 32);
String returnType =
request.getPartAsString("return-type", 32);
try {
-
fcp.makePersistentGlobalRequest(fetchURI, expectedMIMEType, persistence,
returnType);
+
fcp.makePersistentGlobalRequestBlocking(fetchURI, expectedMIMEType,
persistence, returnType);
} catch (NotAllowedException e) {
this.writeError(L10n.getString("QueueToadlet.errorDToDisk"),
L10n.getString("QueueToadlet.errorDToDiskConfig"), ctx);
return;
@@ -234,13 +220,13 @@
writePermanentRedirect(ctx, "Done",
"/queue/");
return;
}
- LinkedList<String> success = new
LinkedList<String>(), failure = new LinkedList<String>();
+ LinkedList<String> success = new LinkedList(),
failure = new LinkedList();
for(int i=0; i<keys.length; i++) {
String currentKey = keys[i];
try {
FreenetURI fetchURI = new
FreenetURI(currentKey);
-
fcp.makePersistentGlobalRequest(fetchURI, null, "forever", "disk");
+
fcp.makePersistentGlobalRequestBlocking(fetchURI, null, "forever", "disk");
success.add(currentKey);
} catch (Exception e) {
failure.add(currentKey);
@@ -284,16 +270,8 @@
} else if (request.isPartSet("change_priority")) {
String identifier =
request.getPartAsString("identifier", MAX_IDENTIFIER_LENGTH);
short newPriority =
Short.parseShort(request.getPartAsString("priority", 32));
- ClientRequest[] clientRequests =
fcp.getGlobalRequests();
-loop: for (int requestIndex = 0, requestCount =
clientRequests.length; requestIndex < requestCount; requestIndex++) {
- ClientRequest clientRequest =
clientRequests[requestIndex];
- if
(clientRequest.getIdentifier().equals(identifier)) {
-
clientRequest.modifyRequest(null, newPriority); // no new ClientToken
- break loop;
- }
- }
+ fcp.modifyGlobalRequestBlocking(identifier,
null, newPriority);
writePermanentRedirect(ctx, "Done", "/queue/");
- fcp.forceStorePersistentRequests();
return;
} else if (request.getPartAsString("insert",
128).length() > 0) {
FreenetURI insertURI;
@@ -326,12 +304,13 @@
/* copy bucket data */
Bucket copiedBucket =
core.persistentTempBucketFactory.makeBucket(file.getData().size());
BucketTools.copy(file.getData(), copiedBucket);
+ final ClientPut clientPut;
try {
- ClientPut clientPut = new
ClientPut(fcp.getGlobalClient(), insertURI, identifier, Integer.MAX_VALUE,
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, ClientRequest.PERSIST_FOREVER,
null, false, !compress, -1, ClientPutMessage.UPLOAD_FROM_DIRECT, null,
file.getContentType(), copiedBucket, null, fnam, false);
- clientPut.start();
- fcp.forceStorePersistentRequests();
+ clientPut = new
ClientPut(fcp.getGlobalForeverClient(), insertURI, identifier,
Integer.MAX_VALUE, RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS,
ClientRequest.PERSIST_FOREVER, null, false, !compress, -1,
ClientPutMessage.UPLOAD_FROM_DIRECT, null, file.getContentType(), copiedBucket,
null, fnam, false, fcp);
} catch (IdentifierCollisionException e) {
- e.printStackTrace();
+ Logger.error(this, "Cannot put same
file twice in same millisecond");
+ writePermanentRedirect(ctx, "Done",
"/queue/");
+ return;
} catch (NotAllowedException e) {
this.writeError(L10n.getString("QueueToadlet.errorAccessDenied"),
L10n.getString("QueueToadlet.errorAccessDeniedFile", new String[]{ "file" },
new String[]{ file.getFilename() }), ctx);
return;
@@ -341,7 +320,20 @@
} catch (MalformedURLException mue1) {
writeError(L10n.getString("QueueToadlet.errorInvalidURI"),
L10n.getString("QueueToadlet.errorInvalidURIToU"), ctx);
return;
+ } catch (MetadataUnresolvedException e) {
+ Logger.error(this, "Unresolved metadata
in starting insert from data uploaded from browser: "+e, e);
+ writePermanentRedirect(ctx, "Done",
"/queue/");
+ return;
+ // FIXME should this be a proper
localised message? It shouldn't happen... but we'd like to get reports if it
does.
}
+ if(clientPut != null)
+ try {
+ fcp.startBlocking(clientPut);
+ } catch (IdentifierCollisionException
e) {
+ Logger.error(this, "Cannot put
same file twice in same millisecond");
+ writePermanentRedirect(ctx,
"Done", "/queue/");
+ return;
+ }
writePermanentRedirect(ctx, "Done", "/queue/");
return;
} else if (request.isPartSet("insert-local-file")) {
@@ -363,13 +355,14 @@
String target = file.getName();
if(!furi.getKeyType().equals("CHK"))
target = null;
+ final ClientPut clientPut;
try {
- ClientPut clientPut = new
ClientPut(fcp.getGlobalClient(), furi, identifier, Integer.MAX_VALUE,
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, ClientRequest.PERSIST_FOREVER,
null, false, false, -1, ClientPutMessage.UPLOAD_FROM_DISK, file, contentType,
new FileBucket(file, true, false, false, false, false), null, target, false);
+ clientPut = new
ClientPut(fcp.getGlobalForeverClient(), furi, identifier, Integer.MAX_VALUE,
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, ClientRequest.PERSIST_FOREVER,
null, false, false, -1, ClientPutMessage.UPLOAD_FROM_DISK, file, contentType,
new FileBucket(file, true, false, false, false, false), null, target, false,
fcp);
if(logMINOR) Logger.minor(this,
"Started global request to insert "+file+" to CHK@ as "+identifier);
- clientPut.start();
- fcp.forceStorePersistentRequests();
} catch (IdentifierCollisionException e) {
- e.printStackTrace();
+ Logger.error(this, "Cannot put same
file twice in same millisecond");
+ writePermanentRedirect(ctx, "Done",
"/queue/");
+ return;
} catch (MalformedURLException e) {
writeError(L10n.getString("QueueToadlet.errorInvalidURI"),
L10n.getString("QueueToadlet.errorInvalidURIToU"), ctx);
return;
@@ -379,7 +372,20 @@
} catch (NotAllowedException e) {
this.writeError(L10n.getString("QueueToadlet.errorAccessDenied"),
L10n.getString("QueueToadlet.errorAccessDeniedFile", new String[]{ "file" },
new String[]{ file.getName() }), ctx);
return;
+ } catch (MetadataUnresolvedException e) {
+ Logger.error(this, "Unresolved metadata
in starting insert from data from file: "+e, e);
+ writePermanentRedirect(ctx, "Done",
"/queue/");
+ return;
+ // FIXME should this be a proper
localised message? It shouldn't happen... but we'd like to get reports if it
does.
}
+ if(clientPut != null)
+ try {
+ fcp.startBlocking(clientPut);
+ } catch (IdentifierCollisionException
e) {
+ Logger.error(this, "Cannot put
same file twice in same millisecond");
+ writePermanentRedirect(ctx,
"Done", "/queue/");
+ return;
+ }
writePermanentRedirect(ctx, "Done", "/queue/");
return;
} else if (request.isPartSet("insert-local-dir")) {
@@ -397,13 +403,14 @@
return;
}
}
+ ClientPutDir clientPutDir;
try {
- ClientPutDir clientPutDir = new
ClientPutDir(fcp.getGlobalClient(), furi, identifier, Integer.MAX_VALUE,
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, ClientRequest.PERSIST_FOREVER,
null, false, false, -1, file, null, false, true, false);
+ clientPutDir = new
ClientPutDir(fcp.getGlobalForeverClient(), furi, identifier, Integer.MAX_VALUE,
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, ClientRequest.PERSIST_FOREVER,
null, false, false, -1, file, null, false, true, false, fcp);
if(logMINOR) Logger.minor(this,
"Started global request to insert dir "+file+" to "+furi+" as "+identifier);
- clientPutDir.start();
- fcp.forceStorePersistentRequests();
} catch (IdentifierCollisionException e) {
- e.printStackTrace();
+ Logger.error(this, "Cannot put same
directory twice in same millisecond");
+ writePermanentRedirect(ctx, "Done",
"/queue/");
+ return;
} catch (MalformedURLException e) {
writeError(L10n.getString("QueueToadlet.errorInvalidURI"),
L10n.getString("QueueToadlet.errorInvalidURIToU"), ctx);
return;
@@ -411,52 +418,16 @@
this.writeError(L10n.getString("QueueToadlet.errorNoFileOrCannotRead"),
L10n.getString("QueueToadlet.errorAccessDeniedFile", new String[]{ "file" },
new String[]{ file.toString() }), ctx);
return;
}
+ if(clientPutDir != null)
+ try {
+ fcp.startBlocking(clientPutDir);
+ } catch (IdentifierCollisionException
e) {
+ Logger.error(this, "Cannot put
same file twice in same millisecond");
+ writePermanentRedirect(ctx,
"Done", "/queue/");
+ return;
+ }
writePermanentRedirect(ctx, "Done", "/queue/");
return;
- } else if (request.isPartSet("get")) {
- String identifier =
request.getPartAsString("identifier", MAX_IDENTIFIER_LENGTH);
- ClientRequest[] clientRequests =
fcp.getGlobalRequests();
-loop: for (int requestIndex = 0, requestCount =
clientRequests.length; requestIndex < requestCount; requestIndex++) {
- ClientRequest clientRequest =
clientRequests[requestIndex];
- if
(clientRequest.getIdentifier().equals(identifier)) {
- if (clientRequest instanceof
ClientGet) {
- ClientGet clientGet =
(ClientGet) clientRequest;
- if
(clientGet.hasSucceeded()) {
- Bucket
dataBucket = clientGet.getBucket();
- if (dataBucket
!= null) {
- String
forceDownload = request.getPartAsString("forceDownload", 32);
- if
(forceDownload.length() > 0) {
-
long forceDownloadTime = Long.parseLong(forceDownload);
-
if ((System.currentTimeMillis() - forceDownloadTime) > 60 * 1000) {
-
break loop;
-
}
-
MultiValueTable<String, String> responseHeaders = new MultiValueTable<String,
String>();
-
responseHeaders.put("Content-Disposition", "attachment; filename=\"" +
clientGet.getURI().getPreferredFilename() + '"');
-
writeReply(ctx, 200, "application/x-msdownload", "OK", responseHeaders,
dataBucket);
-
return;
- }
-
HTMLNode pageNode =
ctx.getPageMaker().getPageNode(L10n.getString("QueueToadlet.warningUnsafeContent"),
ctx);
-
HTMLNode contentNode = ctx.getPageMaker().getContentNode(pageNode);
-
HTMLNode alertNode =
contentNode.addChild(ctx.getPageMaker().getInfobox("infobox-alert",
L10n.getString("QueueToadlet.warningUnsafeContent")));
-
HTMLNode alertContent = ctx.getPageMaker().getContentNode(alertNode);
-
alertContent.addChild("#",
L10n.getString("QueueToadlet.warningUnsafeContentExplanation"));
-
HTMLNode optionListNode = alertContent.addChild("ul");
-
HTMLNode optionForm = ctx.addFormChild(optionListNode, "/queue/",
"queueDownloadNotFilteredConfirmForm-" + identifier.hashCode());
-
optionForm.addChild("input", new String[] { "type", "name", "value" }, new
String[] { "hidden", "identifier", identifier });
-
optionForm.addChild("input", new String[] { "type", "name", "value" }, new
String[] { "hidden", "forceDownload",
String.valueOf(System.currentTimeMillis()) });
-
optionForm.addChild("input", new String[] { "type", "name", "value" }, new
String[] { "submit", "get", "Download anyway" });
-
optionForm.addChild("input", new String[] { "type", "name", "value" }, new
String[] { "submit", "return", "Return to queue page" });
-
writeHTMLReply(ctx, 200, "OK", pageNode.generate());
- return;
- }
- }
-
writeError(L10n.getString("QueueToadlet.errorDownloadNotCompleted"),
L10n.getString("QueueToadlet.errorDownloadNotCompleted"), ctx);
- return;
- }
- }
- }
-
writeError(L10n.getString("QueueToadlet.errorDownloadNotFound"),
L10n.getString("QueueToadlet.errorDownloadNotFoundExplanation"), ctx);
- return;
}
} finally {
request.freeParts();
@@ -483,7 +454,7 @@
}
@Override
- public void handleGet(URI uri, final HTTPRequest request,
ToadletContext ctx)
+ public void handleGet(URI uri, final HTTPRequest request, final
ToadletContext ctx)
throws ToadletContextClosedException, IOException, RedirectException {
// We ensure that we have a FCP server running
@@ -504,49 +475,118 @@
final String requestPath =
request.getPath().substring("/queue/".length());
+ boolean countRequests = false;
+
if (requestPath.length() > 0) {
- /* okay, there is something in the path, check it. */
- try {
- FreenetURI key = new FreenetURI(requestPath);
-
- /* locate request */
- ClientRequest[] clientRequests =
fcp.getGlobalRequests();
- for (int requestIndex = 0, requestCount =
clientRequests.length; requestIndex < requestCount; requestIndex++) {
- ClientRequest clientRequest =
clientRequests[requestIndex];
- if (clientRequest.hasFinished() &&
(clientRequest instanceof ClientGet)) {
- ClientGet clientGet =
(ClientGet) clientRequest;
- if
(clientGet.getURI().equals(key)) {
- Bucket data =
clientGet.getBucket();
- String mimeType =
clientGet.getMIMEType();
- String
requestedMimeType = request.getParam("type", null);
- String forceString =
request.getParam("force");
-
FProxyToadlet.handleDownload(ctx, data, ctx.getBucketFactory(), mimeType,
requestedMimeType, forceString, request.isParameterSet("forcedownload"),
"/queue/", key, "", "/queue/", false, ctx, core);
- return;
- }
+ if(requestPath.equals("countRequests.html") ||
requestPath.equals("/countRequests.html")) {
+ countRequests = true;
+ } else {
+ /* okay, there is something in the path, check
it. */
+ try {
+ FreenetURI key = new
FreenetURI(requestPath);
+
+ /* locate request */
+ TempFetchResult result =
fcp.getCompletedRequestBlocking(key);
+ if(result != null) {
+ Bucket data = result.asBucket();
+ String mimeType =
result.getMimeType();
+ String requestedMimeType =
request.getParam("type", null);
+ String forceString =
request.getParam("force");
+
FProxyToadlet.handleDownload(ctx, data, ctx.getBucketFactory(), mimeType,
requestedMimeType, forceString, request.isParameterSet("forcedownload"),
"/queue/", key, "", "/queue/", false, ctx, core);
+ if(result.freeWhenDone)
+ data.free();
+ return;
}
+ } catch (MalformedURLException mue1) {
}
- } catch (MalformedURLException mue1) {
}
- return;
}
- PageMaker pageMaker = ctx.getPageMaker();
+ class OutputWrapper {
+ boolean done;
+ HTMLNode pageNode;
+ }
+ final OutputWrapper ow = new OutputWrapper();
+
+ final PageMaker pageMaker = ctx.getPageMaker();
+
+ final boolean count = countRequests;
+
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ HTMLNode pageNode = null;
+ try {
+ if(count) {
+ long queued =
core.requestStarters.chkFetchScheduler.countPersistentWaitingKeys(container);
+ System.err.println("Total
waiting CHKs: "+queued);
+ long reallyQueued =
core.requestStarters.chkFetchScheduler.countPersistentQueuedRequests(container);
+ System.err.println("Total
queued CHK requests: "+reallyQueued);
+ pageNode =
pageMaker.getPageNode(L10n.getString("QueueToadlet.title", new String[]{
"nodeName" }, new String[]{ core.getMyName() }), ctx);
+ HTMLNode contentNode =
pageMaker.getContentNode(pageNode);
+ /* add alert summary box */
+ if(ctx.isAllowedFullAccess())
+
contentNode.addChild(core.alerts.createSummary());
+ HTMLNode infobox =
contentNode.addChild(pageMaker.getInfobox("infobox-information", "Queued
requests status"));
+ HTMLNode infoboxContent =
pageMaker.getContentNode(infobox);
+ infoboxContent.addChild("p",
"Total awaiting CHKs: "+queued);
+ infoboxContent.addChild("p",
"Total queued CHK requests: "+reallyQueued);
+ return;
+ } else {
+ pageNode =
handleGetInner(pageMaker, container, context, request, ctx);
+ }
+ } finally {
+ synchronized(ow) {
+ ow.done = true;
+ ow.pageNode = pageNode;
+ ow.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ HTMLNode pageNode;
+ synchronized(ow) {
+ while(true) {
+ if(ow.done) {
+ pageNode = ow.pageNode;
+ break;
+ }
+ try {
+ ow.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+
+ MultiValueTable pageHeaders = new MultiValueTable<String,
String>();
+ if(pageNode != null)
+ writeHTMLReply(ctx, 200, "OK", pageHeaders,
pageNode.generate());
+ else
+ this.writeError("Internal error", "Internal error",
ctx);
+
+ }
+
+ private HTMLNode handleGetInner(PageMaker pageMaker, final
ObjectContainer container, ClientContext context, final HTTPRequest request,
ToadletContext ctx) {
+
// First, get the queued requests, and separate them into
different types.
- LinkedList<ClientRequest> completedDownloadToDisk = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> completedDownloadToTemp = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> completedUpload = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> completedDirUpload = new
LinkedList<ClientRequest>();
+ LinkedList<ClientRequest> completedDownloadToDisk = new
LinkedList();
+ LinkedList<ClientRequest> completedDownloadToTemp = new
LinkedList();
+ LinkedList<ClientRequest> completedUpload = new LinkedList();
+ LinkedList<ClientRequest> completedDirUpload = new LinkedList();
- LinkedList<ClientRequest> failedDownload = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> failedUpload = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> failedDirUpload = new
LinkedList<ClientRequest>();
+ LinkedList<ClientRequest> failedDownload = new LinkedList();
+ LinkedList<ClientRequest> failedUpload = new LinkedList();
+ LinkedList<ClientRequest> failedDirUpload = new LinkedList();
- LinkedList<ClientRequest> uncompletedDownload = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> uncompletedUpload = new
LinkedList<ClientRequest>();
- LinkedList<ClientRequest> uncompletedDirUpload = new
LinkedList<ClientRequest>();
+ LinkedList<ClientRequest> uncompletedDownload = new
LinkedList();
+ LinkedList<ClientRequest> uncompletedUpload = new LinkedList();
+ LinkedList<ClientRequest> uncompletedDirUpload = new
LinkedList();
- ClientRequest[] reqs = fcp.getGlobalRequests();
+ ClientRequest[] reqs = fcp.getGlobalRequests(container);
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Request count: "+reqs.length);
@@ -561,12 +601,14 @@
infoboxContent.addChild("#",
L10n.getString("QueueToadlet.noTaskOnGlobalQueue"));
contentNode.addChild(createInsertBox(pageMaker, ctx,
core.isAdvancedModeEnabled()));
contentNode.addChild(createBulkDownloadForm(ctx,
pageMaker));
- writeHTMLReply(ctx, 200, "OK", pageNode.generate());
- return;
+ return pageNode;
}
short lowestQueuedPrio = RequestStarter.MINIMUM_PRIORITY_CLASS;
+ long totalQueuedDownloadSize = 0;
+ long totalQueuedUploadSize = 0;
+
for(int i=0;i<reqs.length;i++) {
ClientRequest req = reqs[i];
if(req instanceof ClientGet) {
@@ -586,6 +628,9 @@
if(prio < lowestQueuedPrio)
lowestQueuedPrio = prio;
uncompletedDownload.add(cg);
+ long size = cg.getDataSize(container);
+ if(size > 0)
+ totalQueuedDownloadSize += size;
}
} else if(req instanceof ClientPut) {
ClientPut cp = (ClientPut) req;
@@ -599,6 +644,9 @@
lowestQueuedPrio = prio;
uncompletedUpload.add(cp);
}
+ long size = cp.getDataSize(container);
+ if(size > 0)
+ totalQueuedUploadSize += size;
} else if(req instanceof ClientPutDir) {
ClientPutDir cp = (ClientPutDir) req;
if(cp.hasSucceeded()) {
@@ -611,8 +659,13 @@
lowestQueuedPrio = prio;
uncompletedDirUpload.add(cp);
}
+ long size = cp.getTotalDataSize();
+ if(size > 0)
+ totalQueuedUploadSize += size;
}
}
+ System.err.println("Total queued downloads:
"+SizeUtil.formatSize(totalQueuedDownloadSize));
+ System.err.println("Total queued uploads:
"+SizeUtil.formatSize(totalQueuedUploadSize));
Comparator<ClientRequest> jobComparator = new
Comparator<ClientRequest>() {
public int compare(ClientRequest firstRequest,
ClientRequest secondRequest) {
@@ -625,9 +678,9 @@
if(sortBy.equals("id")){
result =
firstRequest.getIdentifier().compareToIgnoreCase(secondRequest.getIdentifier());
}else if(sortBy.equals("size")){
- result =
(firstRequest.getTotalBlocks() - secondRequest.getTotalBlocks()) < 0 ? -1 : 1;
+ result =
(firstRequest.getTotalBlocks(container) -
secondRequest.getTotalBlocks(container)) < 0 ? -1 : 1;
}else if(sortBy.equals("progress")){
- result =
(firstRequest.getFetchedBlocks() / firstRequest.getMinBlocks() -
secondRequest.getFetchedBlocks() / secondRequest.getMinBlocks()) < 0 ? -1 : 1;
+ result =
(firstRequest.getFetchedBlocks(container) /
firstRequest.getMinBlocks(container) -
secondRequest.getFetchedBlocks(container) /
secondRequest.getMinBlocks(container)) < 0 ? -1 : 1;
}else
isSet=false;
}else
@@ -722,6 +775,14 @@
navigationContent.addChild("li").addChild("a", "href",
"#uncompletedDirUpload", L10n.getString("QueueToadlet.DUinProgress", new
String[]{ "size" }, new String[]{ String.valueOf(uncompletedDirUpload.size())
}));
includeNavigationBar = true;
}
+ if (totalQueuedDownloadSize > 0) {
+ navigationContent.addChild("li",
L10n.getString("QueueToadlet.totalQueuedDownloads", "size",
SizeUtil.formatSize(totalQueuedDownloadSize)));
+ includeNavigationBar = true;
+ }
+ if (totalQueuedUploadSize > 0) {
+ navigationContent.addChild("li",
L10n.getString("QueueToadlet.totalQueuedUploads", "size",
SizeUtil.formatSize(totalQueuedUploadSize)));
+ includeNavigationBar = true;
+ }
if (includeNavigationBar) {
contentNode.addChild(navigationBar);
@@ -757,9 +818,9 @@
HTMLNode completedDownloadsTempInfobox =
contentNode.addChild(pageMaker.getInfobox("completed_requests",
L10n.getString("QueueToadlet.completedDinTempDirectory", new String[]{ "size"
}, new String[]{ String.valueOf(completedDownloadToTemp.size()) })));
HTMLNode completedDownloadsToTempContent =
pageMaker.getContentNode(completedDownloadsTempInfobox);
if (advancedModeEnabled) {
-
completedDownloadsToTempContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToTemp, new int[] { LIST_IDENTIFIER, LIST_SIZE,
LIST_MIME_TYPE, LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false));
+
completedDownloadsToTempContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToTemp, new int[] { LIST_IDENTIFIER, LIST_SIZE,
LIST_MIME_TYPE, LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false, container));
} else {
-
completedDownloadsToTempContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToTemp, new int[] { LIST_SIZE, LIST_MIME_TYPE, LIST_DOWNLOAD,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, false));
+
completedDownloadsToTempContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToTemp, new int[] { LIST_SIZE, LIST_MIME_TYPE, LIST_DOWNLOAD,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, false,
container));
}
}
@@ -768,9 +829,9 @@
HTMLNode completedToDiskInfobox =
contentNode.addChild(pageMaker.getInfobox("completed_requests",
L10n.getString("QueueToadlet.completedDinDownloadDirectory", new String[]{
"size" }, new String[]{ String.valueOf(completedDownloadToDisk.size()) })));
HTMLNode completedToDiskInfoboxContent =
pageMaker.getContentNode(completedToDiskInfobox);
if (advancedModeEnabled) {
-
completedToDiskInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToDisk, new int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE,
LIST_MIME_TYPE, LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false));
+
completedToDiskInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToDisk, new int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE,
LIST_MIME_TYPE, LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false, container));
} else {
-
completedToDiskInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToDisk, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false));
+
completedToDiskInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedDownloadToDisk, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_DOWNLOAD, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false, container));
}
}
@@ -779,9 +840,9 @@
HTMLNode completedUploadInfobox =
contentNode.addChild(pageMaker.getInfobox("completed_requests",
L10n.getString("QueueToadlet.completedU", new String[]{ "size" }, new String[]{
String.valueOf(completedUpload.size()) })));
HTMLNode completedUploadInfoboxContent =
pageMaker.getContentNode(completedUploadInfobox);
if (advancedModeEnabled) {
-
completedUploadInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedUpload, new int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE,
LIST_MIME_TYPE, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true));
+
completedUploadInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedUpload, new int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE,
LIST_MIME_TYPE, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true, container));
} else {
-
completedUploadInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedUpload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true));
+
completedUploadInfoboxContent.addChild(createRequestTable(pageMaker, ctx,
completedUpload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true,
container));
}
}
@@ -790,9 +851,9 @@
HTMLNode completedUploadDirInfobox =
contentNode.addChild(pageMaker.getInfobox("completed_requests",
L10n.getString("QueueToadlet.completedUDirectory", new String[]{ "size" }, new
String[]{ String.valueOf(completedDirUpload.size()) })));
HTMLNode completedUploadDirContent =
pageMaker.getContentNode(completedUploadDirInfobox);
if (advancedModeEnabled) {
-
completedUploadDirContent.addChild(createRequestTable(pageMaker, ctx,
completedDirUpload, new int[] { LIST_IDENTIFIER, LIST_FILES, LIST_TOTAL_SIZE,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true));
+
completedUploadDirContent.addChild(createRequestTable(pageMaker, ctx,
completedDirUpload, new int[] { LIST_IDENTIFIER, LIST_FILES, LIST_TOTAL_SIZE,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true,
container));
} else {
-
completedUploadDirContent.addChild(createRequestTable(pageMaker, ctx,
completedDirUpload, new int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PERSISTENCE,
LIST_KEY }, priorityClasses, advancedModeEnabled, true));
+
completedUploadDirContent.addChild(createRequestTable(pageMaker, ctx,
completedDirUpload, new int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PERSISTENCE,
LIST_KEY }, priorityClasses, advancedModeEnabled, true, container));
}
}
@@ -801,9 +862,9 @@
HTMLNode failedInfobox =
contentNode.addChild(pageMaker.getInfobox("failed_requests",
L10n.getString("QueueToadlet.failedD", new String[]{ "size" }, new String[]{
String.valueOf(failedDownload.size()) })));
HTMLNode failedContent =
pageMaker.getContentNode(failedInfobox);
if (advancedModeEnabled) {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDownload, new
int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDownload, new
int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, false, container));
} else {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDownload, new
int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, false));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDownload, new
int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, false,
container));
}
}
@@ -812,9 +873,9 @@
HTMLNode failedInfobox =
contentNode.addChild(pageMaker.getInfobox("failed_requests",
L10n.getString("QueueToadlet.failedU", new String[]{ "size" }, new String[]{
String.valueOf(failedUpload.size()) })));
HTMLNode failedContent =
pageMaker.getContentNode(failedInfobox);
if (advancedModeEnabled) {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedUpload, new
int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedUpload, new
int[] { LIST_IDENTIFIER, LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true, container));
} else {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedUpload, new
int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedUpload, new
int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true,
container));
}
}
@@ -823,9 +884,9 @@
HTMLNode failedInfobox =
contentNode.addChild(pageMaker.getInfobox("failed_requests",
L10n.getString("QueueToadlet.failedU", new String[]{ "size" }, new String[]{
String.valueOf(failedDirUpload.size()) })));
HTMLNode failedContent =
pageMaker.getContentNode(failedInfobox);
if (advancedModeEnabled) {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDirUpload, new
int[] { LIST_IDENTIFIER, LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS,
LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDirUpload, new
int[] { LIST_IDENTIFIER, LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS,
LIST_REASON, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true, container));
} else {
-
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDirUpload, new
int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true));
+
failedContent.addChild(createRequestTable(pageMaker, ctx, failedDirUpload, new
int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS, LIST_REASON,
LIST_PERSISTENCE, LIST_KEY }, priorityClasses, advancedModeEnabled, true,
container));
}
}
@@ -834,9 +895,9 @@
HTMLNode uncompletedInfobox =
contentNode.addChild(pageMaker.getInfobox("requests_in_progress",
L10n.getString("QueueToadlet.wipD", new String[]{ "size" }, new String[]{
String.valueOf(uncompletedDownload.size()) })));
HTMLNode uncompletedContent =
pageMaker.getContentNode(uncompletedInfobox);
if (advancedModeEnabled) {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDownload, new int[] { LIST_IDENTIFIER, LIST_PRIORITY, LIST_SIZE,
LIST_MIME_TYPE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_FILENAME, LIST_KEY },
priorityClasses, advancedModeEnabled, false));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDownload, new int[] { LIST_IDENTIFIER, LIST_PRIORITY, LIST_SIZE,
LIST_MIME_TYPE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_FILENAME, LIST_KEY },
priorityClasses, advancedModeEnabled, false, container));
} else {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDownload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_PRIORITY, LIST_KEY, LIST_PERSISTENCE }, priorityClasses,
advancedModeEnabled, false));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDownload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_PRIORITY, LIST_KEY, LIST_PERSISTENCE }, priorityClasses,
advancedModeEnabled, false, container));
}
}
@@ -845,9 +906,9 @@
HTMLNode uncompletedInfobox =
contentNode.addChild(pageMaker.getInfobox("requests_in_progress",
L10n.getString("QueueToadlet.wipU", new String[]{ "size" }, new String[]{
String.valueOf(uncompletedUpload.size()) })));
HTMLNode uncompletedContent =
pageMaker.getContentNode(uncompletedInfobox);
if (advancedModeEnabled) {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedUpload, new int[] { LIST_IDENTIFIER, LIST_PRIORITY, LIST_SIZE,
LIST_MIME_TYPE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_FILENAME, LIST_KEY },
priorityClasses, advancedModeEnabled, true));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedUpload, new int[] { LIST_IDENTIFIER, LIST_PRIORITY, LIST_SIZE,
LIST_MIME_TYPE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_FILENAME, LIST_KEY },
priorityClasses, advancedModeEnabled, true, container));
} else {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedUpload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_PRIORITY, LIST_KEY, LIST_PRIORITY, LIST_PERSISTENCE },
priorityClasses, advancedModeEnabled, true));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedUpload, new int[] { LIST_FILENAME, LIST_SIZE, LIST_MIME_TYPE,
LIST_PROGRESS, LIST_PRIORITY, LIST_KEY, LIST_PRIORITY, LIST_PERSISTENCE },
priorityClasses, advancedModeEnabled, true, container));
}
}
@@ -856,16 +917,15 @@
HTMLNode uncompletedInfobox =
contentNode.addChild(pageMaker.getInfobox("requests_in_progress",
L10n.getString("QueueToadlet.wipDU", new String[]{ "size" }, new String[]{
String.valueOf(uncompletedDirUpload.size()) })));
HTMLNode uncompletedContent =
pageMaker.getContentNode(uncompletedInfobox);
if (advancedModeEnabled) {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDirUpload, new int[] { LIST_IDENTIFIER, LIST_FILES, LIST_PRIORITY,
LIST_TOTAL_SIZE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDirUpload, new int[] { LIST_IDENTIFIER, LIST_FILES, LIST_PRIORITY,
LIST_TOTAL_SIZE, LIST_PROGRESS, LIST_PERSISTENCE, LIST_KEY }, priorityClasses,
advancedModeEnabled, true, container));
} else {
-
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDirUpload, new int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS,
LIST_PRIORITY, LIST_KEY, LIST_PERSISTENCE }, priorityClasses,
advancedModeEnabled, true));
+
uncompletedContent.addChild(createRequestTable(pageMaker, ctx,
uncompletedDirUpload, new int[] { LIST_FILES, LIST_TOTAL_SIZE, LIST_PROGRESS,
LIST_PRIORITY, LIST_KEY, LIST_PERSISTENCE }, priorityClasses,
advancedModeEnabled, true, container));
}
}
contentNode.addChild(createBulkDownloadForm(ctx, pageMaker));
-
- MultiValueTable<String, String> pageHeaders = new
MultiValueTable<String, String>();
- writeHTMLReply(ctx, 200, "OK", pageHeaders,
pageNode.generate());
+
+ return pageNode;
}
@@ -1005,9 +1065,9 @@
return persistenceCell;
}
- private HTMLNode createDownloadCell(PageMaker pageMaker, ClientGet p) {
+ private HTMLNode createDownloadCell(PageMaker pageMaker, ClientGet p,
ObjectContainer container) {
HTMLNode downloadCell = new HTMLNode("td", "class",
"request-download");
- downloadCell.addChild("a", "href", p.getURI().toString(),
L10n.getString("QueueToadlet.download"));
+ downloadCell.addChild("a", "href",
p.getURI(container).toString(), L10n.getString("QueueToadlet.download"));
return downloadCell;
}
@@ -1085,8 +1145,7 @@
return downloadBox;
}
- private HTMLNode createRequestTable(PageMaker pageMaker, ToadletContext
ctx, List<ClientRequest> requests,
- int[] columns, String[] priorityClasses, boolean
advancedModeEnabled, boolean isUpload) {
+ private HTMLNode createRequestTable(PageMaker pageMaker, ToadletContext
ctx, List<ClientRequest> requests, int[] columns, String[] priorityClasses,
boolean advancedModeEnabled, boolean isUpload, ObjectContainer container) {
HTMLNode table = new HTMLNode("table", "class", "requests");
HTMLNode headerRow = table.addChild("tr", "class",
"table-header");
headerRow.addChild("th");
@@ -1127,23 +1186,23 @@
int column = columns[columnIndex];
if (column == LIST_IDENTIFIER) {
if (clientRequest instanceof ClientGet)
{
-
requestRow.addChild(createIdentifierCell(((ClientGet) clientRequest).getURI(),
clientRequest.getIdentifier(), false));
+
requestRow.addChild(createIdentifierCell(((ClientGet)
clientRequest).getURI(container), clientRequest.getIdentifier(), false));
} else if (clientRequest instanceof
ClientPutDir) {
-
requestRow.addChild(createIdentifierCell(((ClientPutDir)
clientRequest).getFinalURI(), clientRequest.getIdentifier(), true));
+
requestRow.addChild(createIdentifierCell(((ClientPutDir)
clientRequest).getFinalURI(container), clientRequest.getIdentifier(), true));
} else if (clientRequest instanceof
ClientPut) {
-
requestRow.addChild(createIdentifierCell(((ClientPut)
clientRequest).getFinalURI(), clientRequest.getIdentifier(), false));
+
requestRow.addChild(createIdentifierCell(((ClientPut)
clientRequest).getFinalURI(container), clientRequest.getIdentifier(), false));
}
} else if (column == LIST_SIZE) {
if (clientRequest instanceof ClientGet)
{
-
requestRow.addChild(createSizeCell(((ClientGet) clientRequest).getDataSize(),
((ClientGet) clientRequest).isTotalFinalized(), advancedModeEnabled));
+
requestRow.addChild(createSizeCell(((ClientGet)
clientRequest).getDataSize(container), ((ClientGet)
clientRequest).isTotalFinalized(container), advancedModeEnabled));
} else if (clientRequest instanceof
ClientPut) {
-
requestRow.addChild(createSizeCell(((ClientPut) clientRequest).getDataSize(),
true, advancedModeEnabled));
+
requestRow.addChild(createSizeCell(((ClientPut)
clientRequest).getDataSize(container), true, advancedModeEnabled));
}
} else if (column == LIST_DOWNLOAD) {
-
requestRow.addChild(createDownloadCell(pageMaker, (ClientGet) clientRequest));
+
requestRow.addChild(createDownloadCell(pageMaker, (ClientGet) clientRequest,
container));
} else if (column == LIST_MIME_TYPE) {
if (clientRequest instanceof ClientGet)
{
-
requestRow.addChild(createTypeCell(((ClientGet) clientRequest).getMIMEType()));
+
requestRow.addChild(createTypeCell(((ClientGet)
clientRequest).getMIMEType(container)));
} else if (clientRequest instanceof
ClientPut) {
requestRow.addChild(createTypeCell(((ClientPut) clientRequest).getMIMEType()));
}
@@ -1151,11 +1210,11 @@
requestRow.addChild(createPersistenceCell(clientRequest.isPersistent(),
clientRequest.isPersistentForever()));
} else if (column == LIST_KEY) {
if (clientRequest instanceof ClientGet)
{
-
requestRow.addChild(createKeyCell(((ClientGet) clientRequest).getURI(), false));
+
requestRow.addChild(createKeyCell(((ClientGet)
clientRequest).getURI(container), false));
} else if (clientRequest instanceof
ClientPut) {
-
requestRow.addChild(createKeyCell(((ClientPut) clientRequest).getFinalURI(),
false));
+
requestRow.addChild(createKeyCell(((ClientPut)
clientRequest).getFinalURI(container), false));
}else {
-
requestRow.addChild(createKeyCell(((ClientPutDir) clientRequest).getFinalURI(),
true));
+
requestRow.addChild(createKeyCell(((ClientPutDir)
clientRequest).getFinalURI(container), true));
}
} else if (column == LIST_FILENAME) {
if (clientRequest instanceof ClientGet)
{
@@ -1170,9 +1229,9 @@
} else if (column == LIST_TOTAL_SIZE) {
requestRow.addChild(createSizeCell(((ClientPutDir)
clientRequest).getTotalDataSize(), true, advancedModeEnabled));
} else if (column == LIST_PROGRESS) {
-
requestRow.addChild(createProgressCell(clientRequest.isStarted(), (int)
clientRequest.getFetchedBlocks(), (int) clientRequest.getFailedBlocks(), (int)
clientRequest.getFatalyFailedBlocks(), (int) clientRequest.getMinBlocks(),
(int) clientRequest.getTotalBlocks(), clientRequest.isTotalFinalized() ||
clientRequest instanceof ClientPut, isUpload));
+
requestRow.addChild(createProgressCell(clientRequest.isStarted(), (int)
clientRequest.getFetchedBlocks(container), (int)
clientRequest.getFailedBlocks(container), (int)
clientRequest.getFatalyFailedBlocks(container), (int)
clientRequest.getMinBlocks(container), (int)
clientRequest.getTotalBlocks(container),
clientRequest.isTotalFinalized(container) || clientRequest instanceof
ClientPut, isUpload));
} else if (column == LIST_REASON) {
-
requestRow.addChild(createReasonCell(clientRequest.getFailureReason()));
+
requestRow.addChild(createReasonCell(clientRequest.getFailureReason(container)));
}
}
}
@@ -1187,19 +1246,19 @@
/**
* List of completed request identifiers which the user hasn't
acknowledged yet.
*/
- private final HashSet<String> completedRequestIdentifiers = new
HashSet<String>();
+ private final HashSet<String> completedRequestIdentifiers = new
HashSet();
- private final HashMap<String, UserAlert> alertsByIdentifier = new
HashMap<String, UserAlert>();
+ private final HashMap<String, UserAlert> alertsByIdentifier = new
HashMap();
- public void notifyFailure(ClientRequest req) {
+ public void notifyFailure(ClientRequest req, ObjectContainer container)
{
// FIXME do something???
}
- public void notifySuccess(ClientRequest req) {
+ public void notifySuccess(ClientRequest req, ObjectContainer container)
{
synchronized(completedRequestIdentifiers) {
completedRequestIdentifiers.add(req.getIdentifier());
}
- registerAlert(req);
+ registerAlert(req, container); // should be safe here
saveCompletedIdentifiersOffThread();
}
@@ -1217,20 +1276,26 @@
if(!readCompletedIdentifiers(completedIdentifiersList)) {
readCompletedIdentifiers(completedIdentifiersListNew);
}
- String[] identifiers;
- synchronized(completedRequestIdentifiers) {
- identifiers = completedRequestIdentifiers.toArray(new
String[completedRequestIdentifiers.size()]);
- }
- for(int i=0;i<identifiers.length;i++) {
- ClientRequest req =
fcp.getGlobalClient().getRequest(identifiers[i]);
- if(req == null) {
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ String[] identifiers;
synchronized(completedRequestIdentifiers) {
-
completedRequestIdentifiers.remove(identifiers[i]);
+ identifiers =
completedRequestIdentifiers.toArray(new
String[completedRequestIdentifiers.size()]);
}
- continue;
+ for(int i=0;i<identifiers.length;i++) {
+ ClientRequest req =
fcp.getGlobalRequest(identifiers[i], container);
+ if(req == null) {
+
synchronized(completedRequestIdentifiers) {
+
completedRequestIdentifiers.remove(identifiers[i]);
+ }
+ continue;
+ }
+ registerAlert(req, container);
+ }
}
- registerAlert(req);
- }
+
+ }, NativeThread.HIGH_PRIORITY, false);
}
private boolean readCompletedIdentifiers(File file) {
@@ -1317,7 +1382,7 @@
}
}
- private void registerAlert(ClientRequest req) {
+ private void registerAlert(ClientRequest req, ObjectContainer
container) {
final String identifier = req.getIdentifier();
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR)
@@ -1328,8 +1393,10 @@
return;
}
if(req instanceof ClientGet) {
- FreenetURI uri = ((ClientGet)req).getURI();
- long size = ((ClientGet)req).getDataSize();
+ FreenetURI uri = ((ClientGet)req).getURI(container);
+ if(req.isPersistentForever() && uri != null)
+ container.activate(uri, 5);
+ long size = ((ClientGet)req).getDataSize(container);
String name = uri.getPreferredFilename();
String title = l10n("downloadSucceededTitle",
"filename", name);
HTMLNode text = new HTMLNode("div");
@@ -1358,19 +1425,21 @@
alertsByIdentifier.put(identifier, alert);
}
} else if(req instanceof ClientPut) {
- FreenetURI uri = ((ClientPut)req).getFinalURI();
+ FreenetURI uri =
((ClientPut)req).getFinalURI(container);
+ if(req.isPersistentForever() && uri != null)
+ container.activate(uri, 5);
if(uri == null) {
Logger.error(this, "No URI for supposedly
finished request "+req);
return;
}
- long size = ((ClientPut)req).getDataSize();
+ long size = ((ClientPut)req).getDataSize(container);
String name = uri.getPreferredFilename();
String title = l10n("uploadSucceededTitle", "filename",
name);
HTMLNode text = new HTMLNode("div");
L10n.addL10nSubstitution(text,
"QueueToadlet.uploadSucceeded",
new String[] { "link", "/link",
"filename", "size" },
new String[] { "<a
href=\"/"+uri.toASCIIString()+"\">", "</a>", name, SizeUtil.formatSize(size) }
);
- UserAlert alert =
+ UserAlert alert =
new SimpleHTMLUserAlert(true, title, title, text,
UserAlert.MINOR) {
@Override
public void onDismiss() {
@@ -1392,7 +1461,13 @@
alertsByIdentifier.put(identifier, alert);
}
} else if(req instanceof ClientPutDir) {
- FreenetURI uri = ((ClientPutDir)req).getFinalURI();
+ FreenetURI uri =
((ClientPutDir)req).getFinalURI(container);
+ if(req.isPersistentForever() && uri != null)
+ container.activate(uri, 5);
+ if(uri == null) {
+ Logger.error(this, "No URI for supposedly
finished request "+req);
+ return;
+ }
long size = ((ClientPutDir)req).getTotalDataSize();
int files = ((ClientPutDir)req).getNumberOfFiles();
String name = uri.getPreferredFilename();
@@ -1429,7 +1504,7 @@
return L10n.getString("QueueToadlet."+key, pattern, value);
}
- public void onRemove(ClientRequest req) {
+ public void onRemove(ClientRequest req, ObjectContainer container) {
String identifier = req.getIdentifier();
synchronized(completedRequestIdentifiers) {
completedRequestIdentifiers.remove(identifier);
Modified: trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/StatisticsToadlet.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -295,8 +295,13 @@
// rejection reasons box
drawRejectReasonsBox(nextTableCell, false);
- drawRejectReasonsBox(nextTableCell, true);
-
+ drawRejectReasonsBox(nextTableCell, true);
+
+ // database thread jobs box
+
+ HTMLNode databaseJobsInfobox =
nextTableCell.addChild("div", "class", "infobox");
+ drawDatabaseJobsBox(databaseJobsInfobox);
+
// peer distribution box
overviewTableRow = overviewTable.addChild("tr");
nextTableCell = overviewTableRow.addChild("td",
"class", "first");
@@ -430,6 +435,24 @@
}
}
+ private void drawDatabaseJobsBox(HTMLNode node) {
+ node.addChild("div", "class", "infobox-header",
l10n("databaseJobsByPriority"));
+ HTMLNode threadsInfoboxContent = node.addChild("div", "class",
"infobox-content");
+ int[] jobsByPriority =
core.clientDatabaseExecutor.runningJobs();
+
+ HTMLNode threadsByPriorityTable =
threadsInfoboxContent.addChild("table", "border", "0");
+ HTMLNode row = threadsByPriorityTable.addChild("tr");
+
+ row.addChild("th", l10n("priority"));
+ row.addChild("th", l10n("waiting"));
+
+ for(int i=0; i<jobsByPriority.length; i++) {
+ row = threadsByPriorityTable.addChild("tr");
+ row.addChild("td", String.valueOf(i));
+ row.addChild("td", String.valueOf(jobsByPriority[i]));
+ }
+ }
+
private void drawStoreSizeBox(HTMLNode storeSizeInfobox, double loc,
long nodeUptimeSeconds) {
storeSizeInfobox.addChild("div", "class", "infobox-header",
"Datastore");
Modified: trunk/freenet/src/freenet/clients/http/Toadlet.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/Toadlet.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/clients/http/Toadlet.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -16,6 +16,7 @@
import freenet.client.InsertException;
import freenet.keys.FreenetURI;
import freenet.l10n.L10n;
+import freenet.node.RequestClient;
import freenet.support.HTMLEncoder;
import freenet.support.HTMLNode;
import freenet.support.Logger;
@@ -108,7 +109,7 @@
* for any two unrelated requests. Request selection round-robin's over
these, within any priority and retry count class,
* and above the level of individual block fetches.
*/
- FetchResult fetch(FreenetURI uri, long maxSize, Object clientContext)
throws FetchException {
+ FetchResult fetch(FreenetURI uri, long maxSize, RequestClient
clientContext) throws FetchException {
// For now, just run it blocking.
return client.fetch(uri, maxSize, clientContext);
}
Modified: trunk/freenet/src/freenet/clients/http/bookmark/BookmarkManager.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/bookmark/BookmarkManager.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/bookmark/BookmarkManager.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -10,21 +10,26 @@
import java.net.MalformedURLException;
import java.util.Date;
import java.util.HashMap;
+
+import com.db4o.ObjectContainer;
+
import java.util.List;
+import freenet.client.async.ClientContext;
import freenet.client.async.USKCallback;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
import freenet.l10n.L10n;
import freenet.node.FSParseException;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.io.Closer;
import freenet.support.io.FileUtil;
-public class BookmarkManager {
+public class BookmarkManager implements RequestClient {
public static final SimpleFieldSet DEFAULT_BOOKMARKS;
private final NodeClientCore node;
@@ -92,7 +97,7 @@
private class USKUpdatedCallback implements USKCallback {
- public void onFoundEdition(long edition, USK key) {
+ public void onFoundEdition(long edition, USK key,
ObjectContainer container, ClientContext context, boolean wasMetadata, short
codec, byte[] data) {
List<BookmarkItem> items = MAIN_CATEGORY.getAllItems();
for(int i = 0; i < items.size(); i++) {
if(!"USK".equals(items.get(i).getKeyType()))
@@ -368,4 +373,12 @@
return sfs;
}
+
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/clients/http/filter/CSSReadFilter.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/filter/CSSReadFilter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/filter/CSSReadFilter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -35,7 +35,7 @@
+ ','
+ charset);
InputStream strm = bucket.getInputStream();
- Bucket temp = bf.makeBucket(bucket.size());
+ Bucket temp = bf.makeBucket(-1);
OutputStream os = temp.getOutputStream();
Reader r = null;
Writer w = null;
Modified: trunk/freenet/src/freenet/clients/http/filter/HTMLFilter.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/filter/HTMLFilter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/filter/HTMLFilter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -50,7 +50,7 @@
if(logMINOR) Logger.minor(this, "readFilter():
charset="+charset);
InputStream strm = bucket.getInputStream();
BufferedInputStream bis = new BufferedInputStream(strm, 4096);
- Bucket temp = bf.makeBucket(bucket.size());
+ Bucket temp = bf.makeBucket(-1);
OutputStream os = temp.getOutputStream();
BufferedOutputStream bos = new BufferedOutputStream(os, 4096);
Reader r = null;
Modified: trunk/freenet/src/freenet/clients/http/filter/JPEGFilter.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/filter/JPEGFilter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/filter/JPEGFilter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -68,7 +68,7 @@
return output;
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Need to modify JPEG...");
- Bucket filtered = bf.makeBucket(data.size());
+ Bucket filtered = bf.makeBucket(-1);
OutputStream os = new
BufferedOutputStream(filtered.getOutputStream());
try {
readFilter(data, bf, charset, otherParams, cb,
deleteComments, deleteExif, os);
Modified: trunk/freenet/src/freenet/clients/http/filter/PNGFilter.java
===================================================================
--- trunk/freenet/src/freenet/clients/http/filter/PNGFilter.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/clients/http/filter/PNGFilter.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -74,7 +74,7 @@
return output;
if (Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Need to modify PNG...");
- Bucket filtered = bf.makeBucket(data.size());
+ Bucket filtered = bf.makeBucket(-1);
OutputStream os = new
BufferedOutputStream(filtered.getOutputStream());
try {
readFilter(data, bf, charset, otherParams, cb,
deleteText, deleteTimestamp, checkCRCs, os);
Copied: trunk/freenet/src/freenet/config/NullBooleanCallback.java (from rev
26320, branches/db4o/freenet/src/freenet/config/NullBooleanCallback.java)
===================================================================
--- trunk/freenet/src/freenet/config/NullBooleanCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/config/NullBooleanCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,15 @@
+package freenet.config;
+
+import freenet.support.api.BooleanCallback;
+
+public class NullBooleanCallback extends BooleanCallback {
+
+ public Boolean get() {
+ return false;
+ }
+
+ public void set(Boolean val) throws InvalidConfigValueException {
+ // Ignore
+ }
+
+}
Copied: trunk/freenet/src/freenet/config/NullIntCallback.java (from rev 26320,
branches/db4o/freenet/src/freenet/config/NullIntCallback.java)
===================================================================
--- trunk/freenet/src/freenet/config/NullIntCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/config/NullIntCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,15 @@
+package freenet.config;
+
+import freenet.support.api.IntCallback;
+
+public class NullIntCallback extends IntCallback {
+
+ public Integer get() {
+ return 0;
+ }
+
+ public void set(Integer val) throws InvalidConfigValueException {
+ // Ignore
+ }
+
+}
Copied: trunk/freenet/src/freenet/config/NullLongCallback.java (from rev 26320,
branches/db4o/freenet/src/freenet/config/NullLongCallback.java)
===================================================================
--- trunk/freenet/src/freenet/config/NullLongCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/config/NullLongCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,15 @@
+package freenet.config;
+
+import freenet.support.api.LongCallback;
+
+public class NullLongCallback extends LongCallback {
+
+ public Long get() {
+ return 0L;
+ }
+
+ public void set(Long val) throws InvalidConfigValueException {
+ // Ignore
+ }
+
+}
Copied: trunk/freenet/src/freenet/config/NullShortCallback.java (from rev
26320, branches/db4o/freenet/src/freenet/config/NullShortCallback.java)
===================================================================
--- trunk/freenet/src/freenet/config/NullShortCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/config/NullShortCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,15 @@
+package freenet.config;
+
+import freenet.support.api.ShortCallback;
+
+public class NullShortCallback extends ShortCallback {
+
+ public Short get() {
+ return 0;
+ }
+
+ public void set(Short val) throws InvalidConfigValueException {
+ // Ignore
+ }
+
+}
Copied: trunk/freenet/src/freenet/config/NullStringCallback.java (from rev
26320, branches/db4o/freenet/src/freenet/config/NullStringCallback.java)
===================================================================
--- trunk/freenet/src/freenet/config/NullStringCallback.java
(rev 0)
+++ trunk/freenet/src/freenet/config/NullStringCallback.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,15 @@
+package freenet.config;
+
+import freenet.support.api.StringCallback;
+
+public class NullStringCallback extends StringCallback {
+
+ public String get() {
+ return "";
+ }
+
+ public void set(String val) throws InvalidConfigValueException {
+ // Ignore
+ }
+
+}
Modified: trunk/freenet/src/freenet/config/SubConfig.java
===================================================================
--- trunk/freenet/src/freenet/config/SubConfig.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/config/SubConfig.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -62,36 +62,43 @@
public void register(String optionName, int defaultValue, int sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, IntCallback cb) {
+ if(cb == null) cb = new NullIntCallback();
register(new IntOption(this, optionName, defaultValue,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, long defaultValue, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, LongCallback cb) {
+ if(cb == null) cb = new NullLongCallback();
register(new LongOption(this, optionName, defaultValue,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, String defaultValueString, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, IntCallback cb) {
+ if(cb == null) cb = new NullIntCallback();
register(new IntOption(this, optionName, defaultValueString,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, String defaultValueString, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, LongCallback cb) {
+ if(cb == null) cb = new NullLongCallback();
register(new LongOption(this, optionName, defaultValueString,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, boolean defaultValue, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, BooleanCallback cb) {
+ if(cb == null) cb = new NullBooleanCallback();
register(new BooleanOption(this, optionName, defaultValue,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, String defaultValue, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, StringCallback cb) {
+ if(cb == null) cb = new NullStringCallback();
register(new StringOption(this, optionName, defaultValue,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
public void register(String optionName, short defaultValue, int
sortOrder,
boolean expert, boolean forceWrite, String shortDesc,
String longDesc, ShortCallback cb) {
+ if(cb == null) cb = new NullShortCallback();
register(new ShortOption(this, optionName, defaultValue,
sortOrder, expert, forceWrite, shortDesc, longDesc, cb));
}
Modified: trunk/freenet/src/freenet/crypt/DSAGroup.java
===================================================================
--- trunk/freenet/src/freenet/crypt/DSAGroup.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/crypt/DSAGroup.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -7,6 +7,8 @@
import java.io.InputStream;
import java.math.BigInteger;
+import com.db4o.ObjectContainer;
+
import net.i2p.util.NativeBigInteger;
import freenet.node.FSParseException;
import freenet.support.Base64;
@@ -33,7 +35,13 @@
throw new IllegalArgumentException();
}
- /**
+ private DSAGroup(DSAGroup group) {
+ this.p = new NativeBigInteger(1, group.p.toByteArray());
+ this.q = new NativeBigInteger(1, group.q.toByteArray());
+ this.g = new NativeBigInteger(1, group.g.toByteArray());
+ }
+
+ /**
* Parses a DSA Group from a string, where p, q, and g are in unsigned
* hex-strings, separated by a commas
*/
@@ -150,4 +158,16 @@
return "Global.DSAgroupBigA";
return "p="+HexUtil.biToHex(p)+", q="+HexUtil.biToHex(q)+",
g="+HexUtil.biToHex(g);
}
+
+ public DSAGroup cloneKey() {
+ if(this == Global.DSAgroupBigA) return this;
+ return new DSAGroup(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(p);
+ container.delete(q);
+ container.delete(g);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/crypt/DSAPrivateKey.java
===================================================================
--- trunk/freenet/src/freenet/crypt/DSAPrivateKey.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/crypt/DSAPrivateKey.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -7,6 +7,8 @@
import java.io.*;
import java.util.Random;
+import com.db4o.ObjectContainer;
+
import freenet.support.Base64;
import freenet.support.HexUtil;
import freenet.support.IllegalBase64Exception;
@@ -84,6 +86,11 @@
throw new IllegalBase64Exception("Probably a pubkey");
return new DSAPrivateKey(y, group);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(x);
+ container.delete(this);
+ }
// public static void main(String[] args) throws Exception {
// Yarrow y=new Yarrow();
Modified: trunk/freenet/src/freenet/crypt/DSAPublicKey.java
===================================================================
--- trunk/freenet/src/freenet/crypt/DSAPublicKey.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/crypt/DSAPublicKey.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -6,6 +6,8 @@
import java.io.InputStream;
import java.math.BigInteger;
+import com.db4o.ObjectContainer;
+
import net.i2p.util.NativeBigInteger;
import freenet.store.StorableBlock;
import freenet.support.Base64;
@@ -59,6 +61,12 @@
this(new ByteArrayInputStream(pubkeyBytes));
}
+ private DSAPublicKey(DSAPublicKey key) {
+ fingerprint = null; // regen when needed
+ this.y = new NativeBigInteger(1, key.y.toByteArray());
+ this.group = key.group.cloneKey();
+ }
+
public static DSAPublicKey create(byte[] pubkeyAsBytes) throws
CryptFormatException {
try {
return new DSAPublicKey(new
ByteArrayInputStream(pubkeyAsBytes));
@@ -206,4 +214,14 @@
public byte[] getRoutingKey() {
return asBytesHash();
}
+
+ public DSAPublicKey cloneKey() {
+ return new DSAPublicKey(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(y);
+ group.removeFrom(container);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/io/comm/RetrievalException.java
===================================================================
--- trunk/freenet/src/freenet/io/comm/RetrievalException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/io/comm/RetrievalException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -97,6 +97,10 @@
return "UNKNOWN ("+reason+")";
}
}
+
+ public String getMessage() {
+ return toString();
+ }
@Override
public final synchronized Throwable fillInStackTrace() {
Modified: trunk/freenet/src/freenet/keys/CHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/CHKBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/CHKBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -4,8 +4,12 @@
package freenet.keys;
import java.security.MessageDigest;
+import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.SHA256;
+import freenet.support.Fields;
/**
* @author amphibian
@@ -19,6 +23,7 @@
final byte[] headers;
final short hashIdentifier;
final NodeCHK chk;
+ final int hashCode;
public static final int MAX_LENGTH_BEFORE_COMPRESSION = Integer.MAX_VALUE;
public static final int TOTAL_HEADERS_LENGTH = 36;
public static final int DATA_LENGTH = 32768;
@@ -65,6 +70,7 @@
// Logger.debug(CHKBlock.class, "Data length: "+data.length+", header
length: "+header.length);
if((key != null) && !verify) {
this.chk = key;
+ hashCode = key.hashCode() ^ Fields.hashCode(data) ^
Fields.hashCode(headers) ^ cryptoAlgorithm;
return;
}
@@ -88,6 +94,7 @@
}
// Otherwise it checks out
}
+ hashCode = chk.hashCode() ^ Fields.hashCode(data) ^
Fields.hashCode(headers) ^ cryptoAlgorithm;
}
public Key getKey() {
@@ -113,4 +120,36 @@
public byte[] getRoutingKey() {
return getKey().getRoutingKey();
}
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof CHKBlock)) return false;
+ CHKBlock block = (CHKBlock) o;
+ if(!chk.equals(block.chk)) return false;
+ if(!Arrays.equals(data, block.data)) return false;
+ if(!Arrays.equals(headers, block.headers)) return false;
+ if(hashIdentifier != block.hashIdentifier) return false;
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ /* Storing an SSKBlock is not supported. There are some
complications, so lets
+ * not implement this since we don't actually use the
functionality atm.
+ *
+ * The major problems are:
+ * - In both CHKBlock and SSKBlock, who is responsible for
deleting the node keys? We
+ * have to have them in the objects.
+ * - In SSKBlock, who is responsible for deleting the
DSAPublicKey? And the DSAGroup?
+ * A group might be unique or might be shared between very
many SSKs...
+ *
+ * Especially in the second case, we don't want to just copy
every time even for
+ * transient uses ... the best solution may be to copy in
objectCanNew(), but even
+ * then callers to the relevant getter methods may be a worry.
+ */
+ throw new UnsupportedOperationException("Block set storage in
database not supported");
+ }
+
}
Modified: trunk/freenet/src/freenet/keys/ClientCHK.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHK.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/ClientCHK.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -9,7 +9,10 @@
import java.net.MalformedURLException;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.support.Base64;
+import freenet.support.Fields;
/**
* Client level CHK. Can be converted into a FreenetURI, can be used to decrypt
@@ -18,7 +21,7 @@
public class ClientCHK extends ClientKey {
/** Lazily constructed: the NodeCHK */
- NodeCHK nodeKey;
+ transient NodeCHK nodeKey;
/** Routing key */
final byte[] routingKey;
/** Decryption key */
@@ -29,6 +32,7 @@
final byte cryptoAlgorithm;
/** Compression algorithm, negative means uncompressed */
final short compressionAlgorithm;
+ final int hashCode;
/* We use EXTRA_LENGTH above for consistency, rather than dis.read etc.
Some code depends on this
* being accurate. Change those uses if you like. */
@@ -37,6 +41,19 @@
/** The length of the decryption key */
static final short CRYPTO_KEY_LENGTH = 32;
+ private ClientCHK(ClientCHK key) {
+ this.routingKey = new byte[key.routingKey.length];
+ System.arraycopy(key.routingKey, 0, routingKey, 0,
key.routingKey.length);
+ this.nodeKey = null;
+ this.cryptoKey = new byte[key.cryptoKey.length];
+ System.arraycopy(key.cryptoKey, 0, cryptoKey, 0, key.cryptoKey.length);
+ this.controlDocument = key.controlDocument;
+ this.cryptoAlgorithm = key.cryptoAlgorithm;
+ this.compressionAlgorithm = key.compressionAlgorithm;
+ if(routingKey == null) throw new NullPointerException();
+ hashCode = Fields.hashCode(routingKey) ^ Fields.hashCode(routingKey) ^
compressionAlgorithm;
+ }
+
/**
* @param routingKey The routing key. This is the overall hash of the
* header and content of the key.
@@ -56,6 +73,8 @@
this.controlDocument = isControlDocument;
this.cryptoAlgorithm = algo;
this.compressionAlgorithm = compressionAlgorithm;
+ if(routingKey == null) throw new NullPointerException();
+ hashCode = Fields.hashCode(routingKey) ^ Fields.hashCode(encKey) ^
compressionAlgorithm;
}
/**
@@ -75,6 +94,7 @@
throw new MalformedURLException("Invalid crypto
algorithm");
controlDocument = (extra[2] & 0x02) != 0;
compressionAlgorithm = (short)(((extra[3] & 0xff) << 8) + (extra[4] &
0xff));
+ hashCode = Fields.hashCode(routingKey) ^ Fields.hashCode(cryptoKey) ^
compressionAlgorithm;
}
/**
@@ -95,6 +115,7 @@
dis.readFully(routingKey);
cryptoKey = new byte[CRYPTO_KEY_LENGTH];
dis.readFully(cryptoKey);
+ hashCode = Fields.hashCode(routingKey) ^ Fields.hashCode(cryptoKey) ^
compressionAlgorithm;
}
/**
@@ -168,4 +189,39 @@
public boolean isCompressed() {
return compressionAlgorithm >= 0;
}
+
+ public ClientCHK cloneKey() {
+ return new ClientCHK(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof ClientCHK)) return false;
+ ClientCHK key = (ClientCHK) o;
+ if(controlDocument != key.controlDocument) return false;
+ if(cryptoAlgorithm != key.cryptoAlgorithm) return false;
+ if(compressionAlgorithm != key.compressionAlgorithm) return
false;
+ if(!Arrays.equals(routingKey, key.routingKey)) return false;
+ if(!Arrays.equals(cryptoKey, key.cryptoKey)) return false;
+ return true;
+ }
+
+ public byte[] getRoutingKey() {
+ return routingKey;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(routingKey == null)
+ throw new NullPointerException("Storing a ClientCHK
with no routingKey!: stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this));
+ if(cryptoKey == null)
+ throw new NullPointerException("Storing a ClientCHK
with no cryptoKey!");
+ return true;
+ }
}
Modified: trunk/freenet/src/freenet/keys/ClientCHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -9,6 +9,8 @@
import org.spaceroots.mantissa.random.MersenneTwister;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.BlockCipher;
import freenet.crypt.PCFBMode;
import freenet.crypt.SHA256;
@@ -232,4 +234,19 @@
return key.isMetadata();
}
+ public boolean objectCanNew(ObjectContainer container) {
+ // Useful to be able to tell whether it's a CHKBlock or a
ClientCHKBlock, so override here too.
+ throw new UnsupportedOperationException("ClientCHKBlock storage
in database not supported");
+ }
+
+ public int hashCode() {
+ return key.hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof ClientCHKBlock)) return false;
+ ClientCHKBlock block = (ClientCHKBlock) o;
+ if(!key.equals(block.key)) return false;
+ return super.equals(o);
+ }
}
Modified: trunk/freenet/src/freenet/keys/ClientKey.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientKey.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/ClientKey.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,5 +1,7 @@
package freenet.keys;
+import com.db4o.ObjectContainer;
+
/**
* Base class for client keys.
* Client keys are decodable. Node keys are not.
@@ -12,4 +14,8 @@
*/
public abstract Key getNodeKey();
+ public abstract ClientKey cloneKey();
+
+ public abstract void removeFrom(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/keys/ClientSSK.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientSSK.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/ClientSSK.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -8,10 +8,13 @@
import java.security.MessageDigest;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.DSAPublicKey;
import freenet.crypt.SHA256;
import freenet.crypt.UnsupportedCipherException;
import freenet.crypt.ciphers.Rijndael;
+import freenet.support.Fields;
import freenet.support.HexUtil;
import freenet.support.Logger;
@@ -22,17 +25,34 @@
/** Document name */
public final String docName;
/** Public key */
- protected DSAPublicKey pubKey;
+ protected transient DSAPublicKey pubKey;
/** Public key hash */
public final byte[] pubKeyHash;
/** Encryption key */
public final byte[] cryptoKey;
/** Encrypted hashed docname */
public final byte[] ehDocname;
+ private final int hashCode;
static final int CRYPTO_KEY_LENGTH = 32;
public static final int EXTRA_LENGTH = 5;
+ private ClientSSK(ClientSSK key) {
+ this.cryptoAlgorithm = key.cryptoAlgorithm;
+ this.docName = new String(key.docName);
+ if(key.pubKey != null)
+ this.pubKey = key.pubKey.cloneKey();
+ else
+ this.pubKey = null;
+ pubKeyHash = new byte[key.pubKeyHash.length];
+ System.arraycopy(key.pubKeyHash, 0, pubKeyHash, 0,
pubKeyHash.length);
+ cryptoKey = new byte[key.cryptoKey.length];
+ System.arraycopy(key.cryptoKey, 0, cryptoKey, 0,
key.cryptoKey.length);
+ ehDocname = new byte[key.ehDocname.length];
+ System.arraycopy(key.ehDocname, 0, ehDocname, 0,
key.ehDocname.length);
+ hashCode = Fields.hashCode(pubKeyHash) ^
Fields.hashCode(cryptoKey) ^ Fields.hashCode(ehDocname) ^ docName.hashCode();
+ }
+
public ClientSSK(String docName, byte[] pubKeyHash, byte[] extras,
DSAPublicKey pubKey, byte[] cryptoKey) throws MalformedURLException {
this.docName = docName;
this.pubKey = pubKey;
@@ -77,6 +97,9 @@
} finally {
SHA256.returnMessageDigest(md);
}
+ if(ehDocname == null)
+ throw new NullPointerException();
+ hashCode = Fields.hashCode(pubKeyHash) ^
Fields.hashCode(cryptoKey) ^ Fields.hashCode(ehDocname) ^ docName.hashCode();
}
public ClientSSK(FreenetURI origURI) throws MalformedURLException {
@@ -118,6 +141,10 @@
@Override
public Key getNodeKey() {
try {
+ if(ehDocname == null)
+ throw new NullPointerException();
+ if(pubKeyHash == null)
+ throw new NullPointerException();
return new NodeSSK(pubKeyHash, ehDocname, pubKey,
cryptoAlgorithm);
} catch (SSKVerifyException e) {
IllegalStateException x = new
IllegalStateException("Have already verified and yet it fails!: "+e);
@@ -135,4 +162,28 @@
public String toString() {
return "ClientSSK:"+getURI().toString();
}
+
+ public ClientKey cloneKey() {
+ return new ClientSSK(this);
+ }
+
+ @Override
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof ClientSSK)) return false;
+ ClientSSK key = (ClientSSK) o;
+ if(cryptoAlgorithm != key.cryptoAlgorithm) return false;
+ if(!docName.equals(key.docName)) return false;
+ if(!Arrays.equals(pubKeyHash, key.pubKeyHash)) return false;
+ if(!Arrays.equals(cryptoKey, key.cryptoKey)) return false;
+ if(!Arrays.equals(ehDocname, key.ehDocname)) return false;
+ return true;
+ }
}
Modified: trunk/freenet/src/freenet/keys/ClientSSKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientSSKBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/ClientSSKBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -39,7 +39,7 @@
public static ClientSSKBlock construct(SSKBlock block, ClientSSK key)
throws SSKVerifyException {
// Constructor expects clientkey to have the pubkey.
// In the case of binary blobs, the block may have it instead.
- if(key.getPubKey() == null && block.getPubKey() != null)
+ if(key.getPubKey() == null)
key.setPublicKey(block.getPubKey());
return new ClientSSKBlock(block.data, block.headers, key,
false);
}
@@ -123,4 +123,15 @@
}
}
+ public int hashCode() {
+ return super.hashCode() ^ key.hashCode();
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof ClientSSKBlock)) return false;
+ ClientSSKBlock block = (ClientSSKBlock) o;
+ if(!key.equals(block.key)) return false;
+ return super.equals(o);
+ }
+
}
Modified: trunk/freenet/src/freenet/keys/FreenetURI.java
===================================================================
--- trunk/freenet/src/freenet/keys/FreenetURI.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/FreenetURI.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -13,10 +13,12 @@
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.LinkedList;
+import java.util.List;
import java.util.StringTokenizer;
import java.util.regex.Pattern;
+import com.db4o.ObjectContainer;
+
import freenet.client.InsertException;
import freenet.support.Base64;
import freenet.support.Fields;
@@ -81,11 +83,12 @@
private final long suggestedEdition; // for USKs
private boolean hasHashCode;
private int hashCode;
+ private final int uniqueHashCode;
static final String[] VALID_KEY_TYPES =
new String[]{"CHK", "SSK", "KSK", "USK"};
@Override
- public int hashCode() {
+ public synchronized int hashCode() {
if(hasHashCode)
return hashCode;
int x = keyType.hashCode();
@@ -150,15 +153,18 @@
}
@Override
- public final Object clone() {
+ public final FreenetURI clone() {
return new FreenetURI(this);
}
public FreenetURI(FreenetURI uri) {
+ this.uniqueHashCode = super.hashCode();
keyType = uri.keyType;
docName = uri.docName;
- metaStr = new String[uri.metaStr.length];
- System.arraycopy(uri.metaStr, 0, metaStr, 0, metaStr.length);
+ if(uri.metaStr != null) {
+ metaStr = new String[uri.metaStr.length];
+ System.arraycopy(uri.metaStr, 0, metaStr, 0,
metaStr.length);
+ } else metaStr = null;
if(uri.routingKey != null) {
routingKey = new byte[uri.routingKey.length];
System.arraycopy(uri.routingKey, 0, routingKey, 0,
routingKey.length);
@@ -175,6 +181,7 @@
} else
extra = null;
this.suggestedEdition = uri.suggestedEdition;
+ Logger.minor(this, "Copied: "+toString()+" from
"+uri.toString(), new Exception("debug"));
}
public FreenetURI(String keyType, String docName) {
@@ -211,6 +218,7 @@
String[] metaStr,
byte[] routingKey,
byte[] cryptoKey, byte[] extra2) {
+ this.uniqueHashCode = super.hashCode();
this.keyType = keyType.trim().toUpperCase().intern();
this.docName = docName;
this.metaStr = metaStr;
@@ -218,6 +226,8 @@
this.cryptoKey = cryptoKey;
this.extra = extra2;
this.suggestedEdition = -1;
+ Logger.minor(this, "Created from components: "+toString(), new
Exception("debug"));
+ toString();
}
public FreenetURI(
@@ -227,6 +237,7 @@
byte[] routingKey,
byte[] cryptoKey, byte[] extra2,
long suggestedEdition) {
+ this.uniqueHashCode = super.hashCode();
this.keyType = keyType.trim().toUpperCase().intern();
this.docName = docName;
this.metaStr = metaStr;
@@ -234,12 +245,14 @@
this.cryptoKey = cryptoKey;
this.extra = extra2;
this.suggestedEdition = suggestedEdition;
+ Logger.minor(this, "Created from components (B): "+toString(),
new Exception("debug"));
}
// Strip http:// and freenet: prefix
protected final static Pattern URI_PREFIX =
Pattern.compile("^(http://[^/]+/+)?(freenet:)?");
public FreenetURI(String URI) throws MalformedURLException {
+ this.uniqueHashCode = super.hashCode();
if(URI == null)
throw new MalformedURLException("No URI specified");
@@ -369,10 +382,12 @@
} catch(IllegalBase64Exception e) {
throw new MalformedURLException("Invalid Base64
quantity: " + e);
}
+ Logger.minor(this, "Created from parse: "+toString()+" from
"+URI, new Exception("debug"));
}
/** USK constructor from components. */
public FreenetURI(byte[] pubKeyHash, byte[] cryptoKey, byte[] extra,
String siteName, long suggestedEdition2) {
+ this.uniqueHashCode = super.hashCode();
this.keyType = "USK";
this.routingKey = pubKeyHash;
this.cryptoKey = cryptoKey;
@@ -380,6 +395,7 @@
this.docName = siteName;
this.suggestedEdition = suggestedEdition2;
metaStr = null;
+ Logger.minor(this, "Created from components (USK):
"+toString(), new Exception("debug"));
}
public void decompose() {
@@ -506,7 +522,7 @@
}
}
- public FreenetURI addMetaStrings(LinkedList<String> metaStrings) {
+ public FreenetURI addMetaStrings(List<String> metaStrings) {
return addMetaStrings(metaStrings.toArray(new
String[metaStrings.size()]));
}
@@ -541,7 +557,7 @@
@Override
public String toString() {
if (toStringCache == null)
- toStringCache = toString(false, false);
+ toStringCache = toString(false, false)/* +
"#"+super.toString()+"#"+uniqueHashCode*/;
return toStringCache;
}
@@ -558,6 +574,12 @@
}
public String toString(boolean prefix, boolean pureAscii) {
+ if(keyType == null) {
+ // Not activated or something...
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Not activated?? in
toString("+prefix+","+pureAscii+")");
+ return null;
+ }
StringBuilder b;
if(prefix)
b = new StringBuilder("freenet:");
@@ -622,12 +644,13 @@
return extra;
}
- public LinkedList<String> listMetaStrings() {
- LinkedList<String> l = new LinkedList<String>();
- if(metaStr != null)
+ public ArrayList<String> listMetaStrings() {
+ if(metaStr != null) {
+ ArrayList<String> l = new
ArrayList<String>(metaStr.length);
for(int i = 0; i < metaStr.length; i++)
- l.addLast(metaStr[i]);
- return l;
+ l.add(metaStr[i]);
+ return l;
+ } else return new ArrayList<String>(0);
}
static final byte CHK = 1;
static final byte SSK = 2;
@@ -856,6 +879,30 @@
return "SSK".equals(keyType);
}
+ public void removeFrom(ObjectContainer container) {
+ // All members are inline (arrays, ints etc), treated as
values, so we can happily just call delete(this).
+ container.delete(this);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(this == FreenetURI.EMPTY_CHK_URI) {
+ throw new RuntimeException("Storing static CHK@ to
database - can't remove it!");
+ }
+ return true;
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(!container.ext().isActive(this)) {
+ Logger.error(this, "Updating but not active!", new
Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+ public void objectOnDelete(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.DEBUG, this)) Logger.minor(this,
"Deleting URI", new Exception("debug"));
+ }
+
public boolean isUSK() {
return "USK".equals(keyType);
}
Modified: trunk/freenet/src/freenet/keys/InsertableClientSSK.java
===================================================================
--- trunk/freenet/src/freenet/keys/InsertableClientSSK.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/keys/InsertableClientSSK.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -11,6 +11,8 @@
import org.spaceroots.mantissa.random.MersenneTwister;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.DSA;
import freenet.crypt.DSAGroup;
import freenet.crypt.DSAPrivateKey;
@@ -238,4 +240,10 @@
return Global.DSAgroupBigA;
}
+ @Override
+ public void removeFrom(ObjectContainer container) {
+ privKey.removeFrom(container);
+ super.removeFrom(container);
+ }
+
}
Modified: trunk/freenet/src/freenet/keys/InsertableUSK.java
===================================================================
--- trunk/freenet/src/freenet/keys/InsertableUSK.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/InsertableUSK.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -5,6 +5,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.DSAGroup;
import freenet.crypt.DSAPrivateKey;
import freenet.crypt.DSAPublicKey;
@@ -24,12 +26,12 @@
public final DSAPrivateKey privKey;
public final DSAGroup group;
- public static InsertableUSK createInsertable(FreenetURI uri) throws
MalformedURLException {
+ public static InsertableUSK createInsertable(FreenetURI uri, boolean
persistent) throws MalformedURLException {
if(!uri.getKeyType().equalsIgnoreCase("USK"))
throw new MalformedURLException();
InsertableClientSSK ssk =
InsertableClientSSK.create(uri.setKeyType("SSK"));
- return new InsertableUSK(ssk.docName, ssk.pubKeyHash,
ssk.cryptoKey, ssk.privKey, ssk.getCryptoGroup(), uri.getSuggestedEdition(),
ssk.cryptoAlgorithm);
+ return new InsertableUSK(ssk.docName, ssk.pubKeyHash,
ssk.cryptoKey, ssk.privKey, persistent ? ssk.getCryptoGroup().cloneKey() :
ssk.getCryptoGroup(), uri.getSuggestedEdition(), ssk.cryptoAlgorithm);
}
InsertableUSK(String docName, byte[] pubKeyHash, byte[] cryptoKey,
DSAPrivateKey key, DSAGroup group, long suggestedEdition, byte cryptoAlgorithm)
throws MalformedURLException {
@@ -68,4 +70,9 @@
}
}
+ public void removeFrom(ObjectContainer container) {
+ privKey.removeFrom(container);
+ group.removeFrom(container);
+ super.removeFrom(container);
+ }
}
Modified: trunk/freenet/src/freenet/keys/Key.java
===================================================================
--- trunk/freenet/src/freenet/keys/Key.java 2009-04-01 20:12:14 UTC (rev
26321)
+++ trunk/freenet/src/freenet/keys/Key.java 2009-04-01 20:34:09 UTC (rev
26322)
@@ -9,6 +9,9 @@
import java.security.MessageDigest;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.crypt.CryptFormatException;
import freenet.crypt.DSAPublicKey;
import freenet.crypt.SHA256;
@@ -29,7 +32,7 @@
*
* Base class for node keys.
*/
-public abstract class Key implements WritableToDataOutputStream {
+public abstract class Key implements WritableToDataOutputStream, Comparable {
final int hash;
double cachedNormalizedDouble;
@@ -45,6 +48,15 @@
cachedNormalizedDouble = -1;
}
+ protected Key(Key key) {
+ this.hash = key.hash;
+ this.cachedNormalizedDouble = key.cachedNormalizedDouble;
+ this.routingKey = new byte[key.routingKey.length];
+ System.arraycopy(key.routingKey, 0, routingKey, 0, routingKey.length);
+ }
+
+ public abstract Key cloneKey();
+
/**
* Write to disk.
* Take up exactly 22 bytes.
@@ -97,6 +109,7 @@
public synchronized double toNormalizedDouble() {
if(cachedNormalizedDouble > 0) return cachedNormalizedDouble;
MessageDigest md = SHA256.getMessageDigest();
+ if(routingKey == null) throw new NullPointerException();
md.update(routingKey);
int TYPE = getType();
md.update((byte)(TYPE >> 8));
@@ -254,4 +267,8 @@
/** Get the full key, including any crypto type bytes, everything
needed to construct a Key object */
public abstract byte[] getFullKey();
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/keys/KeyBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/KeyBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/KeyBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,9 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.keys;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.store.StorableBlock;
/**
Modified: trunk/freenet/src/freenet/keys/NodeCHK.java
===================================================================
--- trunk/freenet/src/freenet/keys/NodeCHK.java 2009-04-01 20:12:14 UTC (rev
26321)
+++ trunk/freenet/src/freenet/keys/NodeCHK.java 2009-04-01 20:34:09 UTC (rev
26322)
@@ -7,7 +7,11 @@
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
+
+import com.db4o.ObjectContainer;
+
import freenet.support.Base64;
+import freenet.support.Fields;
import freenet.support.Logger;
/**
@@ -27,6 +31,15 @@
throw new IllegalArgumentException("Wrong length:
"+routingKey2.length+" should be "+KEY_LENGTH);
this.cryptoAlgorithm = cryptoAlgorithm;
}
+
+ private NodeCHK(NodeCHK key) {
+ super(key);
+ this.cryptoAlgorithm = key.cryptoAlgorithm;
+ }
+
+ public Key cloneKey() {
+ return new NodeCHK(this);
+ }
public static final int KEY_LENGTH = 32;
@@ -60,6 +73,7 @@
@Override
public boolean equals(Object key) {
+ if(key == this) return true;
if(key instanceof NodeCHK) {
NodeCHK chk = (NodeCHK) key;
return java.util.Arrays.equals(chk.routingKey, routingKey) &&
(cryptoAlgorithm == chk.cryptoAlgorithm);
@@ -114,4 +128,14 @@
System.arraycopy(keyBuf, 2, out, 0, KEY_LENGTH);
return out;
}
+
+ public int compareTo(Object arg0) {
+ if(arg0 instanceof NodeSSK) return 1;
+ NodeCHK key = (NodeCHK) arg0;
+ return Fields.compareBytes(routingKey, key.routingKey);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/keys/NodeSSK.java
===================================================================
--- trunk/freenet/src/freenet/keys/NodeSSK.java 2009-04-01 20:12:14 UTC (rev
26321)
+++ trunk/freenet/src/freenet/keys/NodeSSK.java 2009-04-01 20:34:09 UTC (rev
26322)
@@ -10,6 +10,8 @@
import java.security.MessageDigest;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.DSAPublicKey;
import freenet.crypt.SHA256;
import freenet.node.GetPubkey;
@@ -35,7 +37,7 @@
/** E(H(docname)) (E = encrypt using decrypt key, which only clients
know) */
final byte[] encryptedHashedDocname;
/** The signature key, if we know it */
- DSAPublicKey pubKey;
+ transient DSAPublicKey pubKey;
final int hashCode;
static final int SSK_VERSION = 1;
@@ -69,6 +71,21 @@
hashCode = Fields.hashCode(pkHash) ^ Fields.hashCode(ehDocname);
}
+ private NodeSSK(NodeSSK key) {
+ super(key);
+ this.cryptoAlgorithm = key.cryptoAlgorithm;
+ this.pubKey = key.pubKey;
+ this.pubKeyHash = new byte[key.pubKeyHash.length];
+ System.arraycopy(key.pubKeyHash, 0, pubKeyHash, 0,
key.pubKeyHash.length);
+ this.encryptedHashedDocname = new
byte[key.encryptedHashedDocname.length];
+ System.arraycopy(key.encryptedHashedDocname, 0, encryptedHashedDocname,
0, key.encryptedHashedDocname.length);
+ this.hashCode = key.hashCode;
+ }
+
+ public Key cloneKey() {
+ return new NodeSSK(this);
+ }
+
// routingKey = H( E(H(docname)) + H(pubkey) )
private static byte[] makeRoutingKey(byte[] pkHash, byte[] ehDocname) {
MessageDigest md256 = SHA256.getMessageDigest();
@@ -151,6 +168,7 @@
@Override
public boolean equals(Object o) {
+ if(o == this) return true;
if(!(o instanceof NodeSSK)) return false;
NodeSSK key = (NodeSSK)o;
if(!Arrays.equals(key.encryptedHashedDocname,
encryptedHashedDocname)) return false;
@@ -211,5 +229,18 @@
System.arraycopy(keyBuf, 2+E_H_DOCNAME_SIZE, pubKeyHash, 0,
PUBKEY_HASH_SIZE);
return makeRoutingKey(pubKeyHash, encryptedHashedDocname);
}
+
+ public int compareTo(Object arg0) {
+ if(arg0 instanceof NodeCHK) return -1;
+ NodeSSK key = (NodeSSK) arg0;
+ int result = Fields.compareBytes(encryptedHashedDocname,
key.encryptedHashedDocname);
+ if(result != 0) return result;
+ return Fields.compareBytes(pubKeyHash, key.pubKeyHash);
+ }
+ @Override
+ public void removeFrom(ObjectContainer container) {
+ super.removeFrom(container);
+ }
+
}
Modified: trunk/freenet/src/freenet/keys/SSKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/SSKBlock.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/keys/SSKBlock.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -6,11 +6,14 @@
import java.security.MessageDigest;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import net.i2p.util.NativeBigInteger;
import freenet.crypt.DSA;
import freenet.crypt.DSAPublicKey;
import freenet.crypt.DSASignature;
import freenet.crypt.SHA256;
+import freenet.support.Fields;
import freenet.support.HexUtil;
/**
@@ -49,6 +52,7 @@
final DSAPublicKey pubKey;
final short hashIdentifier;
final short symCipherIdentifier;
+ final int hashCode;
public static final short DATA_LENGTH = 1024;
/* Maximum length of compressed payload */
@@ -83,7 +87,7 @@
@Override
public int hashCode(){
- return super.hashCode();
+ return hashCode;
}
/**
@@ -147,6 +151,7 @@
if(!Arrays.equals(ehDocname, nodeKey.encryptedHashedDocname))
throw new SSKVerifyException("E(H(docname)) wrong -
wrong key?? \nfrom headers: "+HexUtil.bytesToHex(ehDocname)+"\nfrom key:
"+HexUtil.bytesToHex(nodeKey.encryptedHashedDocname));
SHA256.returnMessageDigest(md);
+ hashCode = Fields.hashCode(data) ^ Fields.hashCode(headers) ^
nodeKey.hashCode() ^ pubKey.hashCode() ^ hashIdentifier;
}
public Key getKey() {
@@ -176,5 +181,22 @@
public byte[] getRoutingKey() {
return getKey().getRoutingKey();
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ /* Storing an SSKBlock is not supported. There are some
complications, so lets
+ * not implement this since we don't actually use the
functionality atm.
+ *
+ * The major problems are:
+ * - In both CHKBlock and SSKBlock, who is responsible for
deleting the node keys? We
+ * have to have them in the objects.
+ * - In SSKBlock, who is responsible for deleting the
DSAPublicKey? And the DSAGroup?
+ * A group might be unique or might be shared between very
many SSKs...
+ *
+ * Especially in the second case, we don't want to just copy
every time even for
+ * transient uses ... the best solution may be to copy in
objectCanNew(), but even
+ * then callers to the relevant getter methods may be a worry.
+ */
+ throw new UnsupportedOperationException("Block set storage in
database not supported");
+ }
}
Modified: trunk/freenet/src/freenet/keys/USK.java
===================================================================
--- trunk/freenet/src/freenet/keys/USK.java 2009-04-01 20:12:14 UTC (rev
26321)
+++ trunk/freenet/src/freenet/keys/USK.java 2009-04-01 20:34:09 UTC (rev
26322)
@@ -6,6 +6,8 @@
import java.net.MalformedURLException;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.support.Fields;
import freenet.support.Logger;
@@ -85,6 +87,16 @@
siteName.hashCode() ^ (int)suggestedEdition ^
(int)(suggestedEdition >> 32);
}
+ public USK(USK usk) {
+ this.pubKeyHash = usk.pubKeyHash;
+ this.cryptoAlgorithm = usk.cryptoAlgorithm;
+ this.cryptoKey = usk.cryptoKey;
+ this.siteName = usk.siteName;
+ this.suggestedEdition = usk.suggestedEdition;
+ hashCode = Fields.hashCode(pubKeyHash) ^
Fields.hashCode(cryptoKey) ^
+ siteName.hashCode() ^ (int)suggestedEdition ^
(int)(suggestedEdition >> 32);
+ }
+
@Override
public FreenetURI getURI() {
return new FreenetURI(pubKeyHash, cryptoKey,
ClientSSK.getExtraBytes(cryptoAlgorithm), siteName, suggestedEdition);
@@ -112,6 +124,10 @@
return copy(0);
}
+ public USK clone() {
+ return new USK(this);
+ }
+
@Override
public boolean equals(Object o) {
if(o == null || !(o instanceof USK)) return false;
@@ -167,4 +183,8 @@
}
return uri;
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties
===================================================================
--- trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/l10n/freenet.l10n.en.properties 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1006,6 +1006,7 @@
QueueToadlet.failedDU=Failed directory uploads (${size})
QueueToadlet.failedToRemove=Failed to remove ${id}: ${message}
QueueToadlet.failedToRemoveId=Failed to remove: ${id}
+QueueToadlet.failedToRemoveAll=Failed to remove all requests. Either delete
them manually, or shutdown and delete the client database file (node.db4o)
manually.
QueueToadlet.failedToRemoveRequest=Failed to remove request
QueueToadlet.failedToRestart=Failed to restart: ${id}
QueueToadlet.failedToRestartRequest=Failed to restart request
@@ -1052,13 +1053,15 @@
QueueToadlet.uploadProgressbarNotAccurate=This progress value is likely to
change as the upload encodes more blocks
QueueToadlet.reason=Reason
QueueToadlet.remove=Remove
-QueueToadlet.requestNavigation=Request Navigation
+QueueToadlet.requestNavigation=Request Navigation and Totals
QueueToadlet.restart=Restart
QueueToadlet.siteUploadSucceededTitle=Freesite insert succeeded: ${filename}
QueueToadlet.siteUploadSucceeded=Your freesite ${filename} (${files} files,
${size} total size) has been successfully uploaded to Freenet. ${link}Click
here${/link} to open the site homepage.
QueueToadlet.size=Size
QueueToadlet.starting=STARTING
QueueToadlet.title=Downloads and uploads for ${nodeName}
+QueueToadlet.totalQueuedDownloads=Total queued downloads: ${size}
+QueueToadlet.totalQueuedUploads=Total queued uploads: ${size}
QueueToadlet.totalSize=Total Size
QueueToadlet.unknown=Unknown
QueueToadlet.uploadSucceededTitle=Insert succeeded: ${filename}
@@ -1208,6 +1211,7 @@
StatisticsToadlet.authBytes=Connection setup: ${total} output
StatisticsToadlet.bandwidthTitle=Bandwidth
StatisticsToadlet.cpus=Available CPUs: ${count}
+StatisticsToadlet.databaseJobsByPriority=Database jobs
StatisticsToadlet.debuggingBytes=Debugging bytes: ${netColoring} network
coloring, ${ping} ping, ${probe} probe requests, ${routed} routed test messages.
StatisticsToadlet.fullTitle=Statistics for ${name}
StatisticsToadlet.getLogs=Get latest node's logfile
@@ -1232,6 +1236,7 @@
StatisticsToadlet.routingDisabled=Not routing traffic (we are currently
connected to the node but we or it refuse to route traffic)
StatisticsToadlet.routingDisabledShort=Not routing traffic
StatisticsToadlet.statisticGatheringTitle=Statistics Gathering
+StatisticsToadlet.storeJobsByPriority=Store checker jobs by priority
StatisticsToadlet.swapOutput=Swapping Output: ${total}.
StatisticsToadlet.threadDumpButton=Generate a Thread Dump
StatisticsToadlet.threads=Running threads: ${running}/${max}
Modified: trunk/freenet/src/freenet/node/BaseSendableGet.java
===================================================================
--- trunk/freenet/src/freenet/node/BaseSendableGet.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/BaseSendableGet.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,12 +1,19 @@
package freenet.node;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.keys.Key;
public abstract class BaseSendableGet extends SendableRequest {
+ protected BaseSendableGet(boolean persistent) {
+ super(persistent);
+ }
+
/** Get a numbered key to fetch. */
- public abstract Key getNodeKey(Object token);
+ public abstract Key getNodeKey(SendableRequestItem token,
ObjectContainer container);
- public abstract boolean hasValidKeys(KeysFetchingLocally fetching);
+ public abstract boolean hasValidKeys(KeysFetchingLocally fetching,
ObjectContainer container, ClientContext context);
}
Copied: trunk/freenet/src/freenet/node/BulkCallFailureItem.java (from rev
26320, branches/db4o/freenet/src/freenet/node/BulkCallFailureItem.java)
===================================================================
--- trunk/freenet/src/freenet/node/BulkCallFailureItem.java
(rev 0)
+++ trunk/freenet/src/freenet/node/BulkCallFailureItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,13 @@
+package freenet.node;
+
+public class BulkCallFailureItem {
+
+ public final LowLevelGetException e;
+ public final Object token;
+
+ public BulkCallFailureItem(LowLevelGetException e, Object token) {
+ this.e = e;
+ this.token = token;
+ }
+
+}
Modified: trunk/freenet/src/freenet/node/FNPPacketMangler.java
===================================================================
--- trunk/freenet/src/freenet/node/FNPPacketMangler.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/FNPPacketMangler.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -579,7 +579,8 @@
// Phase 3
processJFKMessage3(payload, 4, null, replyTo, false,
true, setupType, negType);
} else {
- Logger.error(this, "Invalid phase "+packetType+" for
anonymous-initiator (we are the responder) from "+replyTo);
+ // FIXME fix this if anything to fix
+ Logger.normal(this, "Invalid phase "+packetType+" for
anonymous-initiator (we are the responder) from "+replyTo);
}
}
@@ -626,7 +627,8 @@
// Phase 4
processJFKMessage4(payload, 4, pn, replyTo, false,
true, setupType, negType);
} else {
- Logger.error(this, "Invalid phase "+packetType+" for
anonymous-initiator (we are the initiator) from "+replyTo);
+ // FIXME fix if necessary, make error??
+ Logger.normal(this, "Invalid phase "+packetType+" for
anonymous-initiator (we are the initiator) from "+replyTo);
}
}
Modified: trunk/freenet/src/freenet/node/FailureTable.java
===================================================================
--- trunk/freenet/src/freenet/node/FailureTable.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/FailureTable.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -275,6 +275,7 @@
* serialise it, as high latencies can otherwise result.
*/
protected void innerOnOffer(Key key, PeerNode peer, byte[]
authenticator) {
+ if(key.getRoutingKey() == null) throw new
NullPointerException();
//NB: node.hasKey() executes a datastore fetch
if(node.hasKey(key)) {
Logger.minor(this, "Already have key");
Copied: trunk/freenet/src/freenet/node/HandlePortTuple.java (from rev 26320,
branches/db4o/freenet/src/freenet/node/HandlePortTuple.java)
===================================================================
--- trunk/freenet/src/freenet/node/HandlePortTuple.java
(rev 0)
+++ trunk/freenet/src/freenet/node/HandlePortTuple.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,10 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.node;
+
+/** Used to associate a port with a node database handle */
+class HandlePortTuple {
+ long handle;
+ int portNumber;
+}
Modified: trunk/freenet/src/freenet/node/KeysFetchingLocally.java
===================================================================
--- trunk/freenet/src/freenet/node/KeysFetchingLocally.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/KeysFetchingLocally.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,5 +9,13 @@
* LOCKING: This should be safe just about anywhere, the lock
protecting it is always taken last.
*/
public boolean hasKey(Key key);
+
+ /**
+ * Is this request:token pair being executed? This applies only to
+ * non-persistent inserts, because persistent requests are selected on
+ * a request level, and requests use hasKey(). Also, activation issues
+ * with SendableRequest meaning getting a hash code would be
problematic!
+ */
+ public boolean hasTransientInsert(SendableInsert insert, Object token);
}
Modified: trunk/freenet/src/freenet/node/LowLevelGetException.java
===================================================================
--- trunk/freenet/src/freenet/node/LowLevelGetException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/LowLevelGetException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -76,7 +76,7 @@
/** Failure code */
public final int code;
- LowLevelGetException(int code, String message, Throwable t) {
+ public LowLevelGetException(int code, String message, Throwable t) {
super(message, t);
this.code = code;
}
Modified: trunk/freenet/src/freenet/node/LowLevelPutException.java
===================================================================
--- trunk/freenet/src/freenet/node/LowLevelPutException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/LowLevelPutException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -38,7 +38,7 @@
}
- LowLevelPutException(int code, String message, Throwable t) {
+ public LowLevelPutException(int code, String message, Throwable t) {
super(message, t);
this.code = code;
}
Modified: trunk/freenet/src/freenet/node/Node.java
===================================================================
--- trunk/freenet/src/freenet/node/Node.java 2009-04-01 20:12:14 UTC (rev
26321)
+++ trunk/freenet/src/freenet/node/Node.java 2009-04-01 20:34:09 UTC (rev
26322)
@@ -22,11 +22,22 @@
import java.util.MissingResourceException;
import java.util.Random;
import java.util.Set;
+import java.util.TreeMap;
import java.util.Vector;
import org.spaceroots.mantissa.random.MersenneTwister;
import org.tanukisoftware.wrapper.WrapperManager;
+import com.db4o.Db4o;
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectServer;
+import com.db4o.ObjectSet;
+import com.db4o.config.Configuration;
+import com.db4o.config.QueryEvaluationMode;
+import com.db4o.diagnostic.ClassHasNoFields;
+import com.db4o.diagnostic.Diagnostic;
+import com.db4o.diagnostic.DiagnosticBase;
+import com.db4o.diagnostic.DiagnosticListener;
import com.sleepycat.je.DatabaseException;
import com.sleepycat.je.Environment;
import com.sleepycat.je.EnvironmentConfig;
@@ -110,6 +121,7 @@
import freenet.support.LRUQueue;
import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
+import freenet.support.NullObject;
import freenet.support.OOMHandler;
import freenet.support.PooledExecutor;
import freenet.support.ShortBuffer;
@@ -235,6 +247,16 @@
}
}
+ /** db4o database for node and client layer.
+ * Other databases can be created for the datastore (since its usage
+ * patterns and content are completely different), or for plugins (for
+ * security reasons). */
+ public final ObjectContainer db;
+ /** A fixed random number which identifies the top-level objects
belonging to
+ * this node, as opposed to any others that might be stored in the same
database
+ * (e.g. because of many-nodes-in-one-VM). */
+ public final long nodeDBHandle;
+
/** Stats */
public final NodeStats nodeStats;
public final NetworkIDManager netid;
@@ -795,6 +817,140 @@
throw new
NodeInitException(NodeInitException.EXIT_BAD_NODE_DIR, msg);
}
+ // init shutdown hook
+ shutdownHook = new SemiOrderedShutdownHook();
+ Runtime.getRuntime().addShutdownHook(shutdownHook);
+
+ /* FIXME: Backup the database! */
+ /* On my db4o test node with lots of downloads, and several
days old, com.db4o.internal.freespace.FreeSlotNode
+ * used 73MB out of the 128MB limit (117MB used). This memory
was not reclaimed despite constant garbage collection.
+ * This is unacceptable! */
+ Configuration dbConfig = Db4o.newConfiguration();
+ dbConfig.freespace().useBTreeSystem();
+
dbConfig.objectClass(freenet.client.async.PersistentCooldownQueueItem.class).objectField("key").indexed(true);
+
dbConfig.objectClass(freenet.client.async.PersistentCooldownQueueItem.class).objectField("keyAsBytes").indexed(true);
+
dbConfig.objectClass(freenet.client.async.PersistentCooldownQueueItem.class).objectField("time").indexed(true);
+
dbConfig.objectClass(freenet.client.async.RegisterMe.class).objectField("core").indexed(true);
+
dbConfig.objectClass(freenet.client.async.RegisterMe.class).objectField("priority").indexed(true);
+
dbConfig.objectClass(freenet.client.async.PersistentCooldownQueueItem.class).objectField("time").indexed(true);
+
dbConfig.objectClass(freenet.client.FECJob.class).objectField("priority").indexed(true);
+
dbConfig.objectClass(freenet.client.FECJob.class).objectField("addedTime").indexed(true);
+
dbConfig.objectClass(freenet.client.FECJob.class).objectField("queue").indexed(true);
+
dbConfig.objectClass(freenet.client.async.InsertCompressor.class).objectField("nodeDBHandle").indexed(true);
+
dbConfig.objectClass(freenet.node.fcp.FCPClient.class).objectField("name").indexed(true);
+
dbConfig.objectClass(freenet.client.async.DatastoreCheckerItem.class).objectField("prio").indexed(true);
+
dbConfig.objectClass(freenet.support.io.PersistentBlobTempBucketTag.class).objectField("index").indexed(true);
+
dbConfig.objectClass(freenet.support.io.PersistentBlobTempBucketTag.class).objectField("bucket").indexed(true);
+
dbConfig.objectClass(freenet.support.io.PersistentBlobTempBucketTag.class).objectField("factory").indexed(true);
+
dbConfig.objectClass(freenet.support.io.PersistentBlobTempBucketTag.class).objectField("isFree").indexed(true);
+
dbConfig.objectClass(freenet.client.FetchException.class).cascadeOnDelete(true);
+ /*
+ * HashMap: don't enable cascade on update/delete/activate,
db4o handles this
+ * internally through the TMap translator.
+ */
+ /** Maybe we want a different query evaluation mode?
+ * At the moment, a big splitfile insert will result in one
SingleBlockInserter
+ * for every key, which means one RegisterMe for each ... this
results in a long pause
+ * when we run the RegisterMe query, plus a lot of RAM usage
for all the UIDs.
+ *
+ * Having said that, if we only run it once, and especially if
we make splitfile
+ * inserts work like splitfile requests, it may not be a big
problem.
+ */
+ // LAZY appears to cause ClassCastException's relating to db4o
objects inside db4o code. :(
+ // Also it causes duplicates if we activate immediately.
+ // And the performance gain for e.g. RegisterMeRunner isn't
that great.
+// dbConfig.queries().evaluationMode(QueryEvaluationMode.LAZY);
+ dbConfig.messageLevel(1);
+ dbConfig.activationDepth(1);
+ /* TURN OFF SHUTDOWN HOOK.
+ * The shutdown hook does auto-commit. We do NOT want
auto-commit: if a
+ * transaction hasn't commit()ed, it's not safe to commit it.
For example,
+ * a splitfile is started, gets half way through, then we shut
down.
+ * The shutdown hook commits the half-finished transaction.
When we start
+ * back up, we assume the whole transaction has been committed,
and end
+ * up only registering the proportion of segments for which a
RegisterMe
+ * has already been created. Yes, this has happened, yes, it
sucks.
+ * Add our own hook to rollback and close... */
+ dbConfig.automaticShutDown(false);
+ /* Block size 8 should have minimal impact since pointers are
this
+ * long, and allows databases of up to 16GB.
+ * FIXME make configurable by user. */
+ dbConfig.blockSize(8);
+ dbConfig.diagnostic().addListener(new DiagnosticListener() {
+
+ public void onDiagnostic(Diagnostic arg0) {
+ if(arg0 instanceof ClassHasNoFields)
+ return; // Ignore
+ if(arg0 instanceof DiagnosticBase) {
+ DiagnosticBase d = (DiagnosticBase)
arg0;
+ Logger.error(this, "Diagnostic:
"+d.getClass()+" : "+d.problem()+" : "+d.solution()+" : "+d.reason(), new
Exception("debug"));
+ } else
+ Logger.error(this, "Diagnostic:
"+arg0+" : "+arg0.getClass(), new Exception("debug"));
+ }
+ });
+
+ shutdownHook.addEarlyJob(new Thread() {
+
+ public void run() {
+ System.err.println("Stopping database jobs...");
+ clientCore.killDatabase();
+ }
+
+ });
+
+ shutdownHook.addLateJob(new Thread() {
+
+ public void run() {
+ System.err.println("Rolling back unfinished
transactions...");
+ db.rollback();
+ System.err.println("Closing database...");
+ db.close();
+ }
+
+ });
+
+ System.err.println("Optimise native queries:
"+dbConfig.optimizeNativeQueries());
+ System.err.println("Query activation depth:
"+dbConfig.activationDepth());
+ db = Db4o.openFile(dbConfig, new File(nodeDir,
"node.db4o").toString());
+
+ System.err.println("Opened database");
+
+ // DUMP DATABASE CONTENTS
+ System.err.println("DUMPING DATABASE CONTENTS:");
+ ObjectSet<Object> contents = db.queryByExample(new Object());
+ Map<String,Integer> map = new HashMap<String, Integer>();
+ Iterator i = contents.iterator();
+ while(i.hasNext()) {
+ Object o = i.next();
+ String name = o.getClass().getName();
+ if((map.get(name)) != null) {
+ map.put(name, map.get(name)+1);
+ } else {
+ map.put(name, 1);
+ }
+ // Activated to depth 1
+ try {
+ Logger.minor(this, "DATABASE:
"+o.getClass()+":"+o+":"+db.ext().getID(o));
+ } catch (Throwable t) {
+ Logger.minor(this, "CAUGHT "+t+" FOR CLASS
"+o.getClass());
+ }
+ db.deactivate(o, 1);
+ }
+ int total = 0;
+ for(Map.Entry<String,Integer> entry : map.entrySet()) {
+ System.err.println(entry.getKey()+" :
"+entry.getValue());
+ total += entry.getValue();
+ }
+ // Some structures e.g. collections are sensitive to the
activation depth.
+ // If they are activated to depth 1, they are broken, and
activating them to
+ // depth 2 does NOT un-break them! Hence we need to deactivate
(above) and
+ // GC here...
+ System.gc();
+ System.runFinalization();
+ System.gc();
+ System.runFinalization();
+ System.err.println("END DATABASE DUMP: "+total+" objects");
+
// Boot ID
bootID = random.nextLong();
// Fixed length file containing boot ID. Accessed with random
access file. So hopefully it will always be
@@ -1064,6 +1220,11 @@
sortOrder += NodeCryptoConfig.OPTION_COUNT;
darknetCrypto = new NodeCrypto(this, false, darknetConfig,
startupTime, enableARKs);
+
+ nodeDBHandle = darknetCrypto.getNodeHandle(db);
+
+ db.commit();
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"COMMITTED");
// Must be created after darknetCrypto
dnsr = new DNSRequester(this);
@@ -1073,10 +1234,6 @@
Logger.normal(Node.class, "Creating node...");
- // init shutdown hook
- shutdownHook = new SemiOrderedShutdownHook();
- Runtime.getRuntime().addShutdownHook(shutdownHook);
-
shutdownHook.addEarlyJob(new Thread() {
@Override
public void run() {
@@ -1733,7 +1890,7 @@
nodeStats = new NodeStats(this, sortOrder, new
SubConfig("node.load", config), obwLimit, ibwLimit, nodeDir);
- clientCore = new NodeClientCore(this, config, nodeConfig,
nodeDir, getDarknetPortNumber(), sortOrder, oldConfig, fproxyConfig, toadlets);
+ clientCore = new NodeClientCore(this, config, nodeConfig,
nodeDir, getDarknetPortNumber(), sortOrder, oldConfig, fproxyConfig, toadlets,
db);
netid = new NetworkIDManager(this);
@@ -2413,7 +2570,17 @@
}
} else
throw new IllegalStateException("Unknown key
type: "+key.getClass());
- if(chk != null) return chk;
+ if(chk != null) {
+ // Probably somebody waiting for it. Trip it.
+ if(clientCore != null &&
clientCore.requestStarters != null) {
+ if(chk instanceof CHKBlock)
+
clientCore.requestStarters.chkFetchScheduler.tripPendingKey(chk);
+ else
+
clientCore.requestStarters.sskFetchScheduler.tripPendingKey(chk);
+ }
+ failureTable.onFound(chk);
+ return chk;
+ }
}
if(localOnly) return null;
if(logMINOR) Logger.minor(this, "Not in store locally");
@@ -3948,6 +4115,15 @@
private SimpleUserAlert alertMTUTooSmall;
+ public final RequestClient nonPersistentClient = new RequestClient() {
+ public boolean persistent() {
+ return false;
+ }
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
public void onTooLowMTU(int minAdvertisedMTU, int minAcceptableMTU) {
if(alertMTUTooSmall == null) {
alertMTUTooSmall = new SimpleUserAlert(false,
l10n("tooSmallMTU"), l10n("tooSmallMTULong", new String[] { "mtu", "minMTU" },
new String[] { Integer.toString(minAdvertisedMTU),
Integer.toString(minAcceptableMTU) }), l10n("tooSmallMTUShort"),
UserAlert.ERROR);
@@ -3966,7 +4142,12 @@
public boolean shallWeRouteAccordingToOurPeersLocation() {
return routeAccordingToOurPeersLocation &&
Version.lastGoodBuild() >= 1160;
}
-
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing Node in database", new
Exception("error"));
+ return false;
+ }
+
private volatile long turtleCount;
/**
Modified: trunk/freenet/src/freenet/node/NodeARKInserter.java
===================================================================
--- trunk/freenet/src/freenet/node/NodeARKInserter.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/NodeARKInserter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -6,6 +6,8 @@
import java.io.UnsupportedEncodingException;
import java.net.UnknownHostException;
+import com.db4o.ObjectContainer;
+
import freenet.client.ClientMetadata;
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -23,7 +25,7 @@
import freenet.support.SimpleReadOnlyArrayBucket;
import freenet.support.api.Bucket;
-public class NodeARKInserter implements ClientCallback {
+public class NodeARKInserter implements ClientCallback, RequestClient {
/**
*
@@ -158,12 +160,11 @@
inserter = new ClientPutter(this, b, uri,
new ClientMetadata("text/plain") /* it
won't quite fit in an SSK anyway */,
node.clientCore.makeClient((short)0,
true).getInsertContext(true),
-
node.clientCore.requestStarters.chkPutScheduler,
node.clientCore.requestStarters.sskPutScheduler,
RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false, this, null, null,
false);
try {
- inserter.start(false);
+ node.clientCore.clientContext.start(inserter, false);
synchronized (this) {
if(fs.get("physical.udp") == null)
@@ -183,19 +184,19 @@
}
}
} catch (InsertException e) {
- onFailure(e, inserter);
+ onFailure(e, inserter, null);
}
}
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
// Impossible
}
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
// Impossible
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
FreenetURI uri = state.getURI();
if(logMINOR) Logger.minor(this, darknetOpennetString + " ARK
insert succeeded: " + uri);
synchronized (this) {
@@ -206,7 +207,7 @@
startInserter();
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
if(logMINOR) Logger.minor(this, darknetOpennetString + " ARK
insert failed: "+e);
synchronized(this) {
lastInsertedPeers = null;
@@ -222,7 +223,7 @@
startInserter();
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
if(logMINOR) Logger.minor(this, "Generated URI for " +
darknetOpennetString + " ARK: "+uri);
long l = uri.getSuggestedEdition();
if(l < crypto.myARKNumber) {
@@ -253,12 +254,20 @@
startInserter();
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Ignore, we don't care
}
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/node/NodeClientCore.java
===================================================================
--- trunk/freenet/src/freenet/node/NodeClientCore.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/NodeClientCore.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,14 +3,26 @@
import java.io.File;
import java.io.IOException;
import java.net.URI;
+import java.util.Iterator;
+import java.util.LinkedList;
+import org.tanukisoftware.wrapper.WrapperManager;
+
+import com.db4o.ObjectContainer;
+
import freenet.client.ArchiveManager;
+import freenet.client.FECQueue;
import freenet.client.HighLevelSimpleClient;
import freenet.client.HighLevelSimpleClientImpl;
import freenet.client.InsertContext;
import freenet.client.async.BackgroundBlockEncoder;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientRequestScheduler;
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.client.async.DatastoreChecker;
import freenet.client.async.HealingQueue;
+import freenet.client.async.InsertCompressor;
import freenet.client.async.SimpleHealingQueue;
import freenet.client.async.USKManager;
import freenet.client.events.SimpleEventProducer;
@@ -41,6 +53,7 @@
import freenet.keys.SSKBlock;
import freenet.keys.SSKVerifyException;
import freenet.l10n.L10n;
+import freenet.node.NodeRestartJobsQueue.RestartDBJob;
import freenet.node.SecurityLevels.PHYSICAL_THREAT_LEVEL;
import freenet.node.fcp.FCPServer;
import freenet.node.useralerts.SimpleUserAlert;
@@ -50,13 +63,17 @@
import freenet.support.Base64;
import freenet.support.Executor;
import freenet.support.Logger;
-import freenet.support.SerialExecutor;
+import freenet.support.MutableBoolean;
+import freenet.support.OOMHandler;
+import freenet.support.OOMHook;
+import freenet.support.PrioritizedSerialExecutor;
import freenet.support.SimpleFieldSet;
import freenet.support.api.BooleanCallback;
import freenet.support.api.IntCallback;
import freenet.support.api.LongCallback;
import freenet.support.api.StringArrCallback;
import freenet.support.api.StringCallback;
+import freenet.support.io.DelayedFreeBucket;
import freenet.support.compress.RealCompressor;
import freenet.support.io.FileUtil;
import freenet.support.io.FilenameGenerator;
@@ -67,13 +84,14 @@
/**
* The connection between the node and the client layer.
*/
-public class NodeClientCore implements Persistable {
+public class NodeClientCore implements Persistable, DBJobRunner, OOMHook {
private static boolean logMINOR;
public final USKManager uskManager;
- final ArchiveManager archiveManager;
+ public final ArchiveManager archiveManager;
public final RequestStarterGroup requestStarters;
private final HealingQueue healingQueue;
+ public final NodeRestartJobsQueue restartJobsQueue;
/** Must be included as a hidden field in order for any dangerous HTTP
operation to complete successfully. */
public final String formPassword;
File downloadDir;
@@ -82,13 +100,15 @@
private boolean downloadAllowedEverywhere;
private File[] uploadAllowedDirs;
private boolean uploadAllowedEverywhere;
- final FilenameGenerator tempFilenameGenerator;
+ public final FilenameGenerator tempFilenameGenerator;
+ public final FilenameGenerator persistentFilenameGenerator;
public final TempBucketFactory tempBucketFactory;
public final PersistentTempBucketFactory persistentTempBucketFactory;
public final Node node;
final NodeStats nodeStats;
public final RandomSource random;
final File tempDir; // Persistent temporary buckets
+ public final FECQueue fecQueue;
public final UserAlertManager alerts;
final TextModeClientInterfaceServer tmci;
TextModeClientInterface directTMCI;
@@ -100,36 +120,51 @@
/** If true, requests are resumed lazily i.e. startup does not block
waiting for them. */
private boolean lazyResume;
protected final Persister persister;
- private final SerialExecutor clientSlowSerialExecutor[];
+ /** All client-layer database access occurs on a SerialExecutor, so
that we don't need
+ * to have multiple parallel transactions. Advantages:
+ * - We never have two copies of the same object in RAM, and more
broadly, we don't
+ * need to worry about interactions between objects from different
transactions.
+ * - Only one weak-reference cache for the database.
+ * - No need to refresh live objects.
+ * - Deactivation is simpler.
+ * Note that the priorities are thread priorities, not request
priorities.
+ */
+ public transient final PrioritizedSerialExecutor clientDatabaseExecutor;
+ public final DatastoreChecker storeChecker;
+
+ public transient final ClientContext clientContext;
+
public static int maxBackgroundUSKFetchers; // Client stuff that
needs to be configged - FIXME
static final int MAX_ARCHIVE_HANDLERS = 200; // don't take up much
RAM... FIXME
static final long MAX_CACHED_ARCHIVE_DATA = 32 * 1024 * 1024; // make a
fixed fraction of the store by default? FIXME
static final long MAX_ARCHIVED_FILE_SIZE = 1024 * 1024; // arbitrary...
FIXME
static final int MAX_CACHED_ELEMENTS = 256 * 1024; // equally
arbitrary! FIXME hopefully we can cache many of these though
+ /** Each FEC item can take a fair amount of RAM, since it's fully
activated with all the buckets, potentially 256
+ * of them, so only cache a small number of them */
+ private static final int FEC_QUEUE_CACHE_SIZE = 20;
private UserAlert startingUpAlert;
-
- NodeClientCore(Node node, Config config, SubConfig nodeConfig, File
nodeDir, int portNumber, int sortOrder, SimpleFieldSet oldConfig, SubConfig
fproxyConfig, SimpleToadletServer toadlets) throws NodeInitException {
+ private RestartDBJob[] startupDatabaseJobs;
+
+ NodeClientCore(Node node, Config config, SubConfig nodeConfig, File
nodeDir, int portNumber, int sortOrder, SimpleFieldSet oldConfig, SubConfig
fproxyConfig, SimpleToadletServer toadlets, ObjectContainer container) throws
NodeInitException {
this.node = node;
this.nodeStats = node.nodeStats;
this.random = node.random;
+ fecQueue = FECQueue.create(node.nodeDBHandle, container);
this.backgroundBlockEncoder = new BackgroundBlockEncoder();
- clientSlowSerialExecutor = new
SerialExecutor[RequestStarter.MINIMUM_PRIORITY_CLASS -
RequestStarter.MAXIMUM_PRIORITY_CLASS + 1];
- for(int i = 0; i < clientSlowSerialExecutor.length; i++) {
- int prio;
- if(i <=
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS)
- prio = NativeThread.NORM_PRIORITY;
- else if(i <= RequestStarter.UPDATE_PRIORITY_CLASS)
- prio = NativeThread.LOW_PRIORITY;
- else
- prio = NativeThread.MIN_PRIORITY;
- clientSlowSerialExecutor[i] = new SerialExecutor(prio);
- }
+ clientDatabaseExecutor = new
PrioritizedSerialExecutor(NativeThread.NORM_PRIORITY,
NativeThread.MAX_PRIORITY+1, NativeThread.NORM_PRIORITY, true);
+ storeChecker = new DatastoreChecker(node);
byte[] pwdBuf = new byte[16];
random.nextBytes(pwdBuf);
compressor = new RealCompressor(node.executor);
this.formPassword = Base64.encode(pwdBuf);
alerts = new UserAlertManager(this);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ restartJobsQueue = NodeRestartJobsQueue.init(node.nodeDBHandle,
container);
+ startupDatabaseJobs =
restartJobsQueue.getEarlyRestartDatabaseJobs(container);
+ if(startupDatabaseJobs != null &&
+ startupDatabaseJobs.length > 0)
+ queue(startupJobRunner, NativeThread.HIGH_PRIORITY,
false);
+ restartJobsQueue.addLateRestartDatabaseJobs(this, container);
persister = new ConfigurablePersister(this, nodeConfig,
"clientThrottleFile", "client-throttle.dat", sortOrder++, true, false,
"NodeClientCore.fileForClientStats",
"NodeClientCore.fileForClientStatsLong", node.ps, nodeDir);
@@ -140,7 +175,6 @@
if(logMINOR)
Logger.minor(this, "Serializing RequestStarterGroup
from:\n" + throttleFS);
- requestStarters = new RequestStarterGroup(node, this,
portNumber, random, config, throttleFS);
// Temp files
@@ -178,6 +212,8 @@
String msg = "Could not find or create temporary
directory (filename generator)";
throw new
NodeInitException(NodeInitException.EXIT_BAD_TEMP_DIR, msg);
}
+
+ uskManager = new USKManager(this);
// Persistent temp files
nodeConfig.register("encryptPersistentTempBuckets", true,
sortOrder++, true, false, "NodeClientCore.encryptPersistentTempBuckets",
"NodeClientCore.encryptPersistentTempBucketsLong", new BooleanCallback() {
@@ -217,14 +253,19 @@
}
});
try {
- persistentTempBucketFactory = new
PersistentTempBucketFactory(new
File(nodeConfig.getString("persistentTempDir")), "freenet-temp-", random,
node.fastWeakRandom, nodeConfig.getBoolean("encryptPersistentTempBuckets"));
+ File dir = new
File(nodeConfig.getString("persistentTempDir"));
+ String prefix = "freenet-temp-";
+ persistentTempBucketFactory =
PersistentTempBucketFactory.load(dir, prefix, random, node.fastWeakRandom,
container, node.nodeDBHandle,
nodeConfig.getBoolean("encryptPersistentTempBuckets"), this, node.getTicker());
+ persistentTempBucketFactory.init(dir, prefix, random,
node.fastWeakRandom);
+ persistentFilenameGenerator =
persistentTempBucketFactory.fg;
} catch(IOException e2) {
- String msg = "Could not find or create persistent
temporary directory";
+ String msg = "Could not find or create persistent
temporary directory: "+e2;
+ e2.printStackTrace();
throw new
NodeInitException(NodeInitException.EXIT_BAD_TEMP_DIR, msg);
}
nodeConfig.register("maxRAMBucketSize", "128KiB", sortOrder++,
true, false, "NodeClientCore.maxRAMBucketSize",
"NodeClientCore.maxRAMBucketSizeLong", new LongCallback() {
-
+
@Override
public Long get() {
return (tempBucketFactory == null ? 0 :
tempBucketFactory.getMaxRAMBucketSize());
@@ -268,6 +309,22 @@
});
tempBucketFactory = new TempBucketFactory(node.executor,
tempFilenameGenerator, nodeConfig.getLong("maxRAMBucketSize"),
nodeConfig.getLong("RAMBucketPoolSize"), random, node.fastWeakRandom,
nodeConfig.getBoolean("encryptTempBuckets"));
+ archiveManager = new ArchiveManager(MAX_ARCHIVE_HANDLERS,
MAX_CACHED_ARCHIVE_DATA, MAX_ARCHIVED_FILE_SIZE, MAX_CACHED_ELEMENTS,
tempBucketFactory);
+
+ healingQueue = new SimpleHealingQueue(
+ new InsertContext(tempBucketFactory,
tempBucketFactory, persistentTempBucketFactory,
+ 0, 2, 1, 0, 0, new
SimpleEventProducer(),
+
!Node.DONT_CACHE_LOCAL_REQUESTS), RequestStarter.PREFETCH_PRIORITY_CLASS, 512
/* FIXME make configurable */);
+
+ clientContext = new ClientContext(this, fecQueue,
node.executor, backgroundBlockEncoder, archiveManager,
persistentTempBucketFactory, tempBucketFactory, healingQueue, uskManager,
random, node.fastWeakRandom, node.getTicker(), tempFilenameGenerator,
persistentFilenameGenerator, compressor);
+ compressor.setClientContext(clientContext);
+ storeChecker.setContext(clientContext);
+
+ requestStarters = new RequestStarterGroup(node, this,
portNumber, random, config, throttleFS, clientContext);
+ clientContext.init(requestStarters);
+ ClientRequestScheduler.loadKeyListeners(container,
clientContext);
+ InsertCompressor.load(container, clientContext);
+
node.securityLevels.addPhysicalThreatLevelListener(new
SecurityLevelListener<PHYSICAL_THREAT_LEVEL>() {
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel,
PHYSICAL_THREAT_LEVEL newLevel) {
@@ -366,16 +423,10 @@
});
setUploadAllowedDirs(nodeConfig.getStringArr("uploadAllowedDirs"));
- archiveManager = new ArchiveManager(MAX_ARCHIVE_HANDLERS,
MAX_CACHED_ARCHIVE_DATA, MAX_ARCHIVED_FILE_SIZE, MAX_CACHED_ELEMENTS,
tempBucketFactory);
Logger.normal(this, "Initializing USK Manager");
System.out.println("Initializing USK Manager");
- uskManager = new USKManager(this);
+ uskManager.init(container, clientContext);
- healingQueue = new
SimpleHealingQueue(requestStarters.chkPutScheduler,
- new InsertContext(tempBucketFactory, tempBucketFactory,
persistentTempBucketFactory,
- random, 0, 2, 1, 0, 0, new SimpleEventProducer(),
- !Node.DONT_CACHE_LOCAL_REQUESTS, uskManager,
backgroundBlockEncoder, node.executor, compressor),
RequestStarter.PREFETCH_PRIORITY_CLASS, 512 /* FIXME make configurable */);
-
nodeConfig.register("lazyResume", false, sortOrder++, true,
false, "NodeClientCore.lazyResume",
"NodeClientCore.lazyResumeLong", new BooleanCallback() {
@@ -426,7 +477,7 @@
// FCP (including persistent requests so needs to start before
FProxy)
try {
- fcpServer = FCPServer.maybeCreate(node, this,
node.config);
+ fcpServer = FCPServer.maybeCreate(node, this,
node.config, container);
} catch(IOException e) {
throw new
NodeInitException(NodeInitException.EXIT_COULD_NOT_START_FCP, "Could not start
FCP: " + e);
} catch(InvalidConfigValueException e) {
@@ -439,6 +490,8 @@
toadletContainer = toadlets;
toadletContainer.setCore(this);
toadletContainer.setBucketFactory(tempBucketFactory);
+ fecQueue.init(RequestStarter.NUMBER_OF_PRIORITY_CLASSES,
FEC_QUEUE_CACHE_SIZE, clientContext.jobRunner, node.executor, clientContext);
+ OOMHandler.addOOMHook(this);
}
private static String l10n(String key) {
@@ -487,14 +540,23 @@
}
public void start(Config config) throws NodeInitException {
+ backgroundBlockEncoder.setContext(clientContext);
node.executor.execute(backgroundBlockEncoder, "Background block
encoder");
+ clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ ArchiveManager.init(container, context,
context.nodeDBHandle);
+ }
+
+ }, NativeThread.MAX_PRIORITY, false);
persister.start();
+
+ storeChecker.start(node.executor, "Datastore checker");
if(fcpServer != null)
fcpServer.maybeStart();
if(tmci != null)
tmci.start();
- for(int i = 0; i < clientSlowSerialExecutor.length; i++)
- clientSlowSerialExecutor[i].start(node.executor, "Heavy
client jobs runner (" + i + ")");
+ backgroundBlockEncoder.runPersistentQueue(clientContext);
node.executor.execute(compressor, "Compression scheduler");
node.executor.execute(new PrioRunnable() {
@@ -518,8 +580,37 @@
return NativeThread.LOW_PRIORITY;
}
}, "Startup completion thread");
+
+ clientDatabaseExecutor.start(node.executor, "Client database
access thread");
}
+
+ private int startupDatabaseJobsDone = 0;
+
+ private DBJob startupJobRunner = new DBJob() {
+ public void run(ObjectContainer container, ClientContext
context) {
+ RestartDBJob job =
startupDatabaseJobs[startupDatabaseJobsDone];
+ try {
+ container.activate(job.job, 1);
+ // Remove before execution, to allow it to
re-add itself if it wants to
+ System.err.println("Cleaning up after restart:
"+job.job);
+ restartJobsQueue.removeRestartJob(job.job,
job.prio, container);
+ job.job.run(container, context);
+ container.commit();
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+" in startup job
"+job, t);
+ // Try again next time
+ restartJobsQueue.queueRestartJob(job.job,
job.prio, container, true);
+ }
+ startupDatabaseJobsDone++;
+ if(startupDatabaseJobsDone ==
startupDatabaseJobs.length)
+ startupDatabaseJobs = null;
+ else
+ context.jobRunner.queue(startupJobRunner,
NativeThread.HIGH_PRIORITY, false);
+ }
+
+ };
+
public interface SimpleRequestSenderCompletionListener {
public void completed(boolean success);
@@ -1091,8 +1182,12 @@
}
public HighLevelSimpleClient makeClient(short prioClass, boolean
forceDontIgnoreTooManyPathComponents) {
- return new HighLevelSimpleClientImpl(this, archiveManager,
tempBucketFactory, random, !Node.DONT_CACHE_LOCAL_REQUESTS, prioClass,
forceDontIgnoreTooManyPathComponents, clientSlowSerialExecutor);
+ return new HighLevelSimpleClientImpl(this, tempBucketFactory,
random, !Node.DONT_CACHE_LOCAL_REQUESTS, prioClass,
forceDontIgnoreTooManyPathComponents);
}
+
+ public boolean cacheInserts() {
+ return !Node.DONT_CACHE_LOCAL_REQUESTS;
+ }
public FCPServer getFCPServer() {
return fcpServer;
@@ -1221,18 +1316,6 @@
return tempDir;
}
- /**
- * Has any client registered an interest in this particular key?
- */
- public boolean clientWantKey(Key key) {
- if(key instanceof NodeCHK)
- return
requestStarters.chkFetchScheduler.anyWantKey(key);
- else if(key instanceof NodeSSK)
- return
requestStarters.sskFetchScheduler.anyWantKey(key);
- else
- throw new IllegalArgumentException("Not a CHK and not
an SSK!");
- }
-
public boolean hasLoadedQueue() {
return fcpServer.hasFinishedStart();
}
@@ -1255,7 +1338,154 @@
return toadletContainer.getBookmarkURIs();
}
- public long countQueuedRequests() {
- return requestStarters.countQueuedRequests();
+ public long countTransientQueuedRequests() {
+ return requestStarters.countTransientQueuedRequests();
}
+
+ public void queue(final DBJob job, int priority, boolean checkDupes) {
+ if(checkDupes)
+ this.clientDatabaseExecutor.executeNoDupes(new
DBJobWrapper(job), priority, ""+job);
+ else
+ this.clientDatabaseExecutor.execute(new
DBJobWrapper(job), priority, ""+job);
+ }
+
+ private boolean killedDatabase = false;
+
+ class DBJobWrapper implements Runnable {
+
+ DBJobWrapper(DBJob job) {
+ this.job = job;
+ if(job == null) throw new NullPointerException();
+ }
+
+ final DBJob job;
+
+ public void run() {
+
+ try {
+ synchronized(NodeClientCore.this) {
+ if(killedDatabase) {
+ Logger.error(this, "Database
killed already, not running job");
+ return;
+ }
+ }
+ if(job == null) throw new
NullPointerException();
+ if(node == null) throw new
NullPointerException();
+ job.run(node.db, clientContext);
+ boolean killed;
+ synchronized(NodeClientCore.this) {
+ killed = killedDatabase;
+ }
+ if(killed) {
+ node.db.rollback();
+ return;
+ } else {
+
persistentTempBucketFactory.preCommit(node.db);
+ node.db.commit();
+ }
+ if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "COMMITTED");
+ persistentTempBucketFactory.postCommit(node.db);
+ } catch (Throwable t) {
+ if(t instanceof OutOfMemoryError) {
+ synchronized(NodeClientCore.this) {
+ killedDatabase = true;
+ }
+ OOMHandler.handleOOM((OutOfMemoryError)
t);
+ } else {
+ Logger.error(this, "Failed to run
database job "+job+" : caught "+t, t);
+ }
+ boolean killed;
+ synchronized(NodeClientCore.this) {
+ killed = killedDatabase;
+ }
+ if(killed) {
+ node.db.rollback();
+ }
+ }
+ }
+
+ public boolean equals(Object o) {
+ if(!(o instanceof DBJobWrapper)) return false;
+ DBJobWrapper cmp = (DBJobWrapper) o;
+ return (cmp.job == job);
+ }
+
+ public String toString() {
+ return "DBJobWrapper:"+job;
+ }
+
+ }
+
+ public boolean onDatabaseThread() {
+ return clientDatabaseExecutor.onThread();
+ }
+
+ public int getQueueSize(int priority) {
+ return clientDatabaseExecutor.getQueueSize(priority);
+ }
+
+ public void handleLowMemory() throws Exception {
+ // Ignore
+ }
+
+ public void handleOutOfMemory() throws Exception {
+ synchronized(this) {
+ killedDatabase = true;
+ }
+ WrapperManager.requestThreadDump();
+ System.err.println("Out of memory: Emergency shutdown to
protect database integrity in progress...");
+
System.exit(NodeInitException.EXIT_OUT_OF_MEMORY_PROTECTING_DATABASE);
+ }
+
+ /**
+ * Queue a job to be run soon after startup. The job must delete itself.
+ */
+ public void queueRestartJob(DBJob job, int priority, ObjectContainer
container, boolean early) {
+ restartJobsQueue.queueRestartJob(job, priority, container,
early);
+ }
+
+ public void removeRestartJob(DBJob job, int priority, ObjectContainer
container) {
+ restartJobsQueue.removeRestartJob(job, priority, container);
+ }
+
+ public void runBlocking(final DBJob job, int priority) {
+ if(clientDatabaseExecutor.onThread()) {
+ job.run(node.db, clientContext);
+ } else {
+ final MutableBoolean finished = new MutableBoolean();
+ queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ try {
+ job.run(container, context);
+ } finally {
+ synchronized(finished) {
+ finished.value = true;
+ finished.notifyAll();
+ }
+ }
+ }
+
+ }, priority, false);
+ synchronized(finished) {
+ while(!finished.value) {
+ try {
+ finished.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+ }
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing NodeClientCore in database",
new Exception("error"));
+ return false;
+ }
+
+ public synchronized void killDatabase() {
+ killedDatabase = true;
+ }
+
}
Modified: trunk/freenet/src/freenet/node/NodeCrypto.java
===================================================================
--- trunk/freenet/src/freenet/node/NodeCrypto.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/NodeCrypto.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -12,6 +12,10 @@
import java.util.ArrayList;
import java.util.zip.DeflaterOutputStream;
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+
import net.i2p.util.NativeBigInteger;
import freenet.crypt.BlockCipher;
import freenet.crypt.DSA;
@@ -542,4 +546,38 @@
public FreenetInetAddress getBindTo() {
return config.getBindTo();
}
+
+ public long getNodeHandle(ObjectContainer setupContainer) {
+ // Ignore warnings, this is db4o magic.
+ ObjectSet<HandlePortTuple> result = setupContainer.query(new
Predicate<HandlePortTuple>() {
+ public boolean match(HandlePortTuple tuple) {
+ return tuple.portNumber == portNumber;
+ }
+ });
+ long handle;
+ if(result.hasNext()) {
+ handle = result.next().handle;
+ System.err.println("Retrieved database handle for node
on port "+portNumber+": "+handle);
+ return handle;
+ } else {
+ while(true) {
+ handle = random.nextLong();
+ HandlePortTuple tuple = new HandlePortTuple();
+ tuple.handle = handle;
+ // Double-check with QBE, just in case the RNG
is broken (similar things have happened before!)
+ ObjectSet os = setupContainer.get(tuple);
+ if(os.hasNext()) {
+ System.err.println("Generating database
handle for node: already taken: "+handle);
+ continue;
+ }
+ tuple.portNumber = portNumber;
+ setupContainer.store(tuple);
+ setupContainer.commit();
+ if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "COMMITTED");
+ System.err.println("Generated and stored
database handle for node on port "+portNumber+": "+handle);
+ return handle;
+ }
+ }
+ }
}
+
Modified: trunk/freenet/src/freenet/node/NodeInitException.java
===================================================================
--- trunk/freenet/src/freenet/node/NodeInitException.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/NodeInitException.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -36,7 +36,9 @@
public static final int EXIT_STORE_FILE_NOT_FOUND = 1;
public static final int EXIT_NODE_UPPER_LIMIT = 1024;
public static final int EXIT_BROKE_WRAPPER_CONF = 28;
+ public static final int EXIT_OUT_OF_MEMORY_PROTECTING_DATABASE = 29;
public static final int EXIT_EXCEPTION_TO_DEBUG = 1023;
+
private static final long serialVersionUID = -1;
NodeInitException(int exitCode, String msg) {
Copied: trunk/freenet/src/freenet/node/NodeRestartJobsQueue.java (from rev
26320, branches/db4o/freenet/src/freenet/node/NodeRestartJobsQueue.java)
===================================================================
--- trunk/freenet/src/freenet/node/NodeRestartJobsQueue.java
(rev 0)
+++ trunk/freenet/src/freenet/node/NodeRestartJobsQueue.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,169 @@
+package freenet.node;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.support.Logger;
+import freenet.support.io.NativeThread;
+
+public class NodeRestartJobsQueue {
+
+ private final long nodeDBHandle;
+
+ public NodeRestartJobsQueue(long nodeDBHandle2) {
+ nodeDBHandle = nodeDBHandle2;
+ dbJobs = new Set[NativeThread.JAVA_PRIORITY_RANGE];
+ dbJobsEarly = new Set[NativeThread.JAVA_PRIORITY_RANGE];
+ for(int i=0;i<dbJobs.length;i++) {
+ dbJobs[i] = new HashSet<DBJob>();
+ dbJobsEarly[i] = new HashSet<DBJob>();
+ }
+ }
+
+ public static NodeRestartJobsQueue init(final long nodeDBHandle,
ObjectContainer container) {
+ ObjectSet<NodeRestartJobsQueue> results =
+ container.query(new Predicate<NodeRestartJobsQueue>() {
+
+ @Override
+ public boolean match(NodeRestartJobsQueue arg0) {
+ return (arg0.nodeDBHandle == nodeDBHandle);
+ }
+
+ });
+ if(results.hasNext()) {
+ System.err.println("Found old restart jobs queue");
+ NodeRestartJobsQueue queue = (NodeRestartJobsQueue)
results.next();
+ container.activate(queue, 1);
+ queue.onInit(container);
+ return queue;
+ }
+ NodeRestartJobsQueue queue = new
NodeRestartJobsQueue(nodeDBHandle);
+ container.store(queue);
+ System.err.println("Created new restart jobs queue");
+ return queue;
+ }
+
+ private void onInit(ObjectContainer container) {
+ }
+
+ private Set<DBJob>[] dbJobs;
+ private Set<DBJob>[] dbJobsEarly;
+
+ public synchronized void queueRestartJob(DBJob job, int priority,
ObjectContainer container, boolean early) {
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Queueing restart job "+job+" at priority "+priority+" early="+early);
+ Set<DBJob> jobs = early ? dbJobsEarly[priority] :
dbJobs[priority];
+ container.store(job);
+ container.activate(jobs, 1);
+ if(jobs.add(job)) {
+ /*
+ * Store to 1 hop only.
+ * Otherwise db4o will update ALL the jobs on the queue
to a depth of 3,
+ * which in practice means all the buckets inside the
BucketChainBucket's
+ * linked by the BucketChainBucketKillTag's (adding new
ones). This will
+ * take ages and is in any case not what we want.
+ * See http://tracker.db4o.com/browse/COR-1436
+ */
+ container.ext().store(jobs, 1);
+ }
+ container.deactivate(jobs, 1);
+ }
+
+ public synchronized void removeRestartJob(DBJob job, int priority,
ObjectContainer container) {
+ boolean jobWasActive = container.ext().isActive(job);
+ if(!jobWasActive) container.activate(job, 1);
+ container.activate(dbJobs[priority], 1);
+ container.activate(dbJobsEarly[priority], 1);
+ if(!(dbJobs[priority].remove(job) ||
dbJobsEarly[priority].remove(job))) {
+ int found = 0;
+ for(int i=0;i<dbJobs.length;i++) {
+ container.activate(dbJobs[priority], 1);
+ if(dbJobs[priority].remove(job)) {
+ /*
+ * Store to 1 hop only.
+ * Otherwise db4o will update ALL the
jobs on the queue to a depth of 3,
+ * which in practice means all the
buckets inside the BucketChainBucket's
+ * linked by the
BucketChainBucketKillTag's (adding new ones). This will
+ * take ages and is in any case not
what we want.
+ * See
http://tracker.db4o.com/browse/COR-1436
+ */
+ container.ext().store(dbJobs[priority],
1);
+ found++;
+ }
+ if(dbJobsEarly[priority].remove(job)) {
+ /*
+ * Store to 1 hop only.
+ * Otherwise db4o will update ALL the
jobs on the queue to a depth of 3,
+ * which in practice means all the
buckets inside the BucketChainBucket's
+ * linked by the
BucketChainBucketKillTag's (adding new ones). This will
+ * take ages and is in any case not
what we want.
+ * See
http://tracker.db4o.com/browse/COR-1436
+ */
+
container.ext().store(dbJobsEarly[priority], 1);
+ found++;
+ }
+ container.deactivate(dbJobs[priority], 1);
+ container.deactivate(dbJobsEarly[priority], 1);
+ }
+ if(found > 0)
+ Logger.error(this, "Job "+job+" not in
specified priority "+priority+" found in "+found+" other priorities when
removing");
+ else
+ Logger.error(this, "Job "+job+" not found when
removing it");
+ } else {
+ /*
+ * Store to 1 hop only.
+ * Otherwise db4o will update ALL the jobs on the queue
to a depth of 3,
+ * which in practice means all the buckets inside the
BucketChainBucket's
+ * linked by the BucketChainBucketKillTag's (adding new
ones). This will
+ * take ages and is in any case not what we want.
+ * See http://tracker.db4o.com/browse/COR-1436
+ */
+ container.ext().store(dbJobs[priority], 1);
+ container.deactivate(dbJobs[priority], 1);
+ container.ext().store(dbJobsEarly[priority], 1);
+ container.deactivate(dbJobsEarly[priority], 1);
+ }
+ if(!jobWasActive) container.deactivate(job, 1);
+ }
+
+ class RestartDBJob {
+ public RestartDBJob(DBJob job2, int i) {
+ job = job2;
+ prio = i;
+ }
+ DBJob job;
+ int prio;
+ }
+
+ synchronized RestartDBJob[] getEarlyRestartDatabaseJobs(ObjectContainer
container) {
+ ArrayList<RestartDBJob> list = new ArrayList<RestartDBJob>();
+ for(int i=dbJobsEarly.length-1;i>=0;i--) {
+ container.activate(dbJobsEarly[i], 1);
+ if(!dbJobsEarly[i].isEmpty())
+ System.err.println("Adding
"+dbJobsEarly[i].size()+" early restart jobs at priority "+i);
+ for(DBJob job : dbJobsEarly[i])
+ list.add(new RestartDBJob(job, i));
+ container.deactivate(dbJobsEarly[i], 1);
+ }
+ return list.toArray(new RestartDBJob[list.size()]);
+ }
+
+ void addLateRestartDatabaseJobs(DBJobRunner runner, ObjectContainer
container) {
+ for(int i=dbJobsEarly.length-1;i>=0;i--) {
+ container.activate(dbJobs[i], 1);
+ if(!dbJobs[i].isEmpty())
+ System.err.println("Adding "+dbJobs[i].size()+"
restart jobs at priority "+i);
+ for(DBJob job : dbJobs[i]) {
+ container.activate(job, 1);
+ runner.queue(job, i, false);
+ }
+ }
+ }
+
+}
Copied: trunk/freenet/src/freenet/node/NullSendableRequestItem.java (from rev
26320, branches/db4o/freenet/src/freenet/node/NullSendableRequestItem.java)
===================================================================
--- trunk/freenet/src/freenet/node/NullSendableRequestItem.java
(rev 0)
+++ trunk/freenet/src/freenet/node/NullSendableRequestItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,11 @@
+package freenet.node;
+
+public class NullSendableRequestItem implements SendableRequestItem {
+
+ public static final SendableRequestItem nullItem = new
NullSendableRequestItem();
+
+ public void dump() {
+ // Do nothing, we will be GC'ed.
+ }
+
+}
Modified: trunk/freenet/src/freenet/node/PeerNode.java
===================================================================
--- trunk/freenet/src/freenet/node/PeerNode.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/PeerNode.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -2105,7 +2105,7 @@
}
if(arkFetcher == null) {
Logger.minor(this, "Starting ARK fetcher for "
+ this + " : " + myARK);
- arkFetcher =
node.clientCore.uskManager.subscribeContent(myARK, this, true,
node.arkFetcherContext, RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
node);
+ arkFetcher =
node.clientCore.uskManager.subscribeContent(myARK, this, true,
node.arkFetcherContext, RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
node.nonPersistentClient);
}
}
}
Copied: trunk/freenet/src/freenet/node/RequestClient.java (from rev 26320,
branches/db4o/freenet/src/freenet/node/RequestClient.java)
===================================================================
--- trunk/freenet/src/freenet/node/RequestClient.java
(rev 0)
+++ trunk/freenet/src/freenet/node/RequestClient.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,23 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.node;
+
+import com.db4o.ObjectContainer;
+
+/**
+ * Must be implemented by any client object returned by
SendableRequest.getClient().
+ * Mostly this is for scheduling, but it does have one key purpose: to
identify whether
+ * a request is persistent or not.
+ * @author toad
+ */
+public interface RequestClient {
+
+ /**
+ * Is this request persistent? **Must not change!**
+ */
+ public boolean persistent();
+
+ public void removeFrom(ObjectContainer container);
+
+}
Modified: trunk/freenet/src/freenet/node/RequestScheduler.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestScheduler.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/RequestScheduler.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,20 +3,26 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
+import java.util.LinkedList;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.FECQueue;
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
import freenet.keys.ClientKey;
-import freenet.support.RandomGrabArray;
+import freenet.keys.Key;
public interface RequestScheduler {
- public SendableRequest removeFirst();
-
/**
* Tell the scheduler that a request from a specific RandomGrabArray
succeeded.
* Definition of "succeeded" will vary, but the point is most
schedulers will run another
* request from the parentGrabArray in the near future on the theory
that if one works,
- * another may also work.
+ * another may also work. Also, delete the ChosenRequest if it is
persistent.
+ * @param req The request we ran, which must be deleted.
* */
- public void succeeded(RandomGrabArray parentGrabArray);
+ public void succeeded(BaseSendableGet get, ChosenBlock req);
/**
* After a key has been requested a few times, it is added to the
cooldown queue for
@@ -26,14 +32,8 @@
* @param key The key to be added.
* @return The time at which the key will leave the cooldown queue.
*/
- public long queueCooldown(ClientKey key, SendableGet getter);
+ long queueCooldown(ClientKey key, SendableGet getter, ObjectContainer
container);
- /**
- * Remove keys from the cooldown queue who have now served their time
and can be requested
- * again.
- */
- public void moveKeysFromCooldownQueue();
-
/** Once a key has been requested a few times, don't request it again
for 30 minutes.
* To do so would be pointless given ULPRs, and just waste bandwidth. */
public static final long COOLDOWN_PERIOD = 30*60*1000;
@@ -41,6 +41,40 @@
* Note: If you don't want your requests to be subject to cooldown
(e.g. in fproxy), make
* your max retry count less than this (and more than -1). */
public static final int COOLDOWN_RETRIES = 3;
- public long countQueuedRequests();
+ public long countTransientQueuedRequests();
+
+ public void queueFillRequestStarterQueue();
+
+ public KeysFetchingLocally fetchingKeys();
+
+ public void removeFetchingKey(Key key);
+ public void callFailure(SendableGet get, LowLevelGetException e, int
prio, boolean persistent);
+
+ public void callFailure(SendableInsert insert, LowLevelPutException
exception, int prio, boolean persistent);
+
+ public FECQueue getFECQueue();
+
+ public ClientContext getContext();
+
+ public boolean addToFetching(Key key);
+
+ public ChosenBlock grabRequest();
+
+ public void removeRunningRequest(SendableRequest request);
+
+ /**
+ * This only works for persistent requests, because transient requests
are not
+ * selected on a SendableRequest level, they are selected on a
{SendableRequest, token} level.
+ */
+ public abstract boolean
isRunningOrQueuedPersistentRequest(SendableRequest request);
+
+ public boolean hasFetchingKey(Key key);
+
+ public void start(NodeClientCore core);
+
+ public boolean addTransientInsertFetching(SendableInsert insert, Object
token);
+
+ public void removeTransientInsertFetching(SendableInsert insert, Object
token);
+
}
Modified: trunk/freenet/src/freenet/node/RequestSender.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestSender.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/RequestSender.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -163,6 +163,7 @@
*/
public RequestSender(Key key, DSAPublicKey pubKey, short htl, long uid,
Node n,
PeerNode source, boolean offersOnly) {
+ if(key.getRoutingKey() == null) throw new NullPointerException();
startTime = System.currentTimeMillis();
this.key = key;
this.pubKey = pubKey;
Modified: trunk/freenet/src/freenet/node/RequestStarter.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestStarter.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/RequestStarter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,8 +3,11 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
-import java.util.HashSet;
+import com.db4o.ObjectContainer;
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
+import freenet.client.async.TransientChosenBlock;
import freenet.keys.Key;
import freenet.support.Logger;
import freenet.support.OOMHandler;
@@ -20,7 +23,7 @@
* And you have to provide a RequestStarterClient. We do round robin between
* clients on the same priority level.
*/
-public class RequestStarter implements Runnable, KeysFetchingLocally,
RandomGrabArrayItemExclusionList {
+public class RequestStarter implements Runnable,
RandomGrabArrayItemExclusionList {
private static volatile boolean logMINOR;
static {
@@ -83,7 +86,6 @@
this.averageInputBytesPerRequest = averageInputBytesPerRequest;
this.isInsert = isInsert;
this.isSSK = isSSK;
- if(!isInsert) keysFetching = new HashSet<Key>();
}
void setScheduler(RequestScheduler sched) {
@@ -91,7 +93,9 @@
}
void start() {
+ sched.start(core);
core.getExecutor().execute(this, name);
+ sched.queueFillRequestStarterQueue();
}
final String name;
@@ -102,7 +106,7 @@
}
void realRun() {
- SendableRequest req = null;
+ ChosenBlock req = null;
sentRequestTime = System.currentTimeMillis();
// The last time at which we sent a request or decided not to
long cycleTime = sentRequestTime;
@@ -121,10 +125,11 @@
}
continue;
}
- sched.moveKeysFromCooldownQueue();
- if(req == null) req = sched.removeFirst();
+ if(req == null) {
+ req = sched.grabRequest();
+ }
if(req != null) {
- if(logMINOR) Logger.minor(this, "Running
"+req+" prio "+req.getPriorityClass()+" retries "+req.getRetryCount());
+ if(logMINOR) Logger.minor(this, "Running
"+req+" priority "+req.getPriority());
// Wait
long delay = throttle.getDelay();
if(logMINOR) Logger.minor(this,
"Delay="+delay+" from "+throttle);
@@ -161,7 +166,7 @@
// Always take the lock on RequestStarter
first. AFAICS we don't synchronize on RequestStarter anywhere else.
// Nested locks here prevent extra latency when
there is a race, and therefore allow us to sleep indefinitely
synchronized(this) {
- req = sched.removeFirst();
+ req = sched.grabRequest();
if(req == null) {
try {
wait(100*1000); // as
close to indefinite as I'm comfortable with! Toad
@@ -173,67 +178,34 @@
}
if(req == null) continue;
if(!startRequest(req, logMINOR)) {
- if(!req.isCancelled())
+ // Don't log if it's a cancelled transient
request.
+ if(!((!req.isPersistent()) &&
req.isCancelled()))
Logger.normal(this, "No requests to
start on "+req);
}
req = null;
cycleTime = sentRequestTime =
System.currentTimeMillis();
}
}
-
- /**
- * All Key's we are currently fetching.
- * Locally originated requests only, avoids some complications with
HTL,
- * and also has the benefit that we can see stuff that's been scheduled
on a SenderThread
- * but that thread hasn't started yet. FIXME: Both issues can be
avoided: first we'd get
- * rid of the SenderThread and start the requests directly and
asynchronously, secondly
- * we'd move this to node but only track keys we are fetching at max
HTL.
- * LOCKING: Always lock this LAST.
- */
- private HashSet<Key> keysFetching;
-
- private boolean startRequest(SendableRequest req, boolean logMINOR) {
- // Create a thread to handle starting the request, and the
resulting feedback
- Object keyNum = null;
- Key key = null;
- while(true) {
- try {
- keyNum = req.chooseKey(isInsert ? null : this);
- if(keyNum == null) return false;
- if(!isInsert) {
- key =
((BaseSendableGet)req).getNodeKey(keyNum);
- if(key == null) return false;
- synchronized(keysFetching) {
- keysFetching.add(key);
- }
- }
- core.getExecutor().execute(new
SenderThread(req, keyNum, key), "RequestStarter$SenderThread for "+req);
- if(logMINOR) Logger.minor(this, "Started
"+req+" key "+keyNum);
- return true;
- } catch (OutOfMemoryError e) {
- OOMHandler.handleOOM(e);
- System.err.println("Will retry above failed
operation...");
- // Possibly out of threads
- try {
- Thread.sleep(5000);
- } catch (InterruptedException e1) {
- // Ignore
- }
- synchronized(keysFetching) {
- if(key != null)
keysFetching.remove(key);
- }
- } catch (Throwable t) {
- if(keyNum != null) {
- // Re-queue
- Logger.error(this, "Caught "+t+" while
trying to start request");
- req.internalError(keyNum, t, sched);
- return true; // Sort of ... maybe it
will clear
- }
- synchronized(keysFetching) {
- if(key != null)
keysFetching.remove(key);
- }
+
+ private boolean startRequest(ChosenBlock req, boolean logMINOR) {
+ if((!req.isPersistent()) && req.isCancelled()) {
+ req.onDumped();
+ return false;
+ }
+ if(req.key != null) {
+ if(!sched.addToFetching(req.key)) {
+ req.onDumped();
+ return false;
}
+ } else if((!req.isPersistent()) &&
((TransientChosenBlock)req).request instanceof SendableInsert) {
+
if(!sched.addTransientInsertFetching((SendableInsert)(((TransientChosenBlock)req).request),
req.token)) {
+ req.onDumped();
+ return false;
+ }
}
+ if(logMINOR) Logger.minor(this, "Running request "+req+"
priority "+req.getPriority());
+ core.getExecutor().execute(new SenderThread(req, req.key),
"RequestStarter$SenderThread for "+req);
+ return true;
}
public void run() {
@@ -251,13 +223,11 @@
private class SenderThread implements Runnable {
- private final SendableRequest req;
- private final Object keyNum;
+ private final ChosenBlock req;
private final Key key;
- public SenderThread(SendableRequest req, Object keyNum, Key
key) {
+ public SenderThread(ChosenBlock req, Key key) {
this.req = req;
- this.keyNum = keyNum;
this.key = key;
}
@@ -267,8 +237,8 @@
// FIXME ? key is not known for inserts here
if (key != null)
stats.reportOutgoingLocalRequestLocation(key.toNormalizedDouble());
- if(!req.send(core, sched, keyNum)) {
- if(!req.isCancelled())
+ if(!req.send(core, sched)) {
+ if(!((!req.isPersistent()) &&
req.isCancelled()))
Logger.error(this, "run() not able to
send a request on "+req);
else
Logger.normal(this, "run() not able to
send a request on "+req+" - request was cancelled");
@@ -276,11 +246,10 @@
if(logMINOR)
Logger.minor(this, "Finished "+req);
} finally {
- if(!isInsert) {
- synchronized(keysFetching) {
- keysFetching.remove(key);
- }
- }
+ if(key != null) sched.removeFetchingKey(key);
+ else if((!req.isPersistent()) &&
((TransientChosenBlock)req).request instanceof SendableInsert)
+
sched.removeTransientInsertFetching((SendableInsert)(((TransientChosenBlock)req).request),
req.token);
+
}
}
@@ -292,16 +261,18 @@
}
}
- public boolean hasKey(Key key) {
- synchronized(keysFetching) {
- return keysFetching.contains(key);
+ public boolean exclude(RandomGrabArrayItem item, ObjectContainer
container, ClientContext context) {
+
if(sched.isRunningOrQueuedPersistentRequest((SendableRequest)item)) {
+ Logger.normal(this, "Excluding already-running request:
"+item, new Exception("debug"));
+ return true;
}
- }
-
- public boolean exclude(RandomGrabArrayItem item) {
if(isInsert) return false;
+ if(!(item instanceof BaseSendableGet)) {
+ Logger.error(this, "On a request scheduler, exclude()
called with "+item, new Exception("error"));
+ return false;
+ }
BaseSendableGet get = (BaseSendableGet) item;
- if(get.hasValidKeys(this))
+ if(get.hasValidKeys(sched.fetchingKeys(), container, context))
return false;
Logger.normal(this, "Excluding (no valid keys): "+get);
return true;
Modified: trunk/freenet/src/freenet/node/RequestStarterGroup.java
===================================================================
--- trunk/freenet/src/freenet/node/RequestStarterGroup.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/RequestStarterGroup.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,7 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientRequestScheduler;
import freenet.config.Config;
import freenet.config.SubConfig;
@@ -36,7 +37,7 @@
public final ClientRequestScheduler sskPutScheduler;
private final NodeStats stats;
- RequestStarterGroup(Node node, NodeClientCore core, int portNumber,
RandomSource random, Config config, SimpleFieldSet fs) {
+ RequestStarterGroup(Node node, NodeClientCore core, int portNumber,
RandomSource random, Config config, SimpleFieldSet fs, ClientContext ctx) {
SubConfig schedulerConfig = new SubConfig("node.scheduler",
config);
this.stats = core.nodeStats;
@@ -47,27 +48,27 @@
throttleWindowRequest = new ThrottleWindowManager(2.0, fs ==
null ? null : fs.subset("ThrottleWindowRequest"), node);
chkRequestThrottle = new MyRequestThrottle(throttleWindow,
5000, "CHK Request", fs == null ? null : fs.subset("CHKRequestThrottle"),
32768);
chkRequestStarter = new RequestStarter(core,
chkRequestThrottle, "CHK Request starter ("+portNumber+ ')',
stats.requestOutputThrottle, stats.requestInputThrottle,
stats.localChkFetchBytesSentAverage, stats.localChkFetchBytesReceivedAverage,
false, false);
- chkFetchScheduler = new ClientRequestScheduler(false, false,
random, chkRequestStarter, node, core, schedulerConfig, "CHKrequester");
+ chkFetchScheduler = new ClientRequestScheduler(false, false,
random, chkRequestStarter, node, core, schedulerConfig, "CHKrequester", ctx);
chkRequestStarter.setScheduler(chkFetchScheduler);
chkRequestStarter.start();
//insertThrottle = new ChainedRequestThrottle(10000, 2.0F,
requestThrottle);
// FIXME reenable the above
chkInsertThrottle = new MyRequestThrottle(throttleWindow,
20000, "CHK Insert", fs == null ? null : fs.subset("CHKInsertThrottle"), 32768);
chkInsertStarter = new RequestStarter(core, chkInsertThrottle,
"CHK Insert starter ("+portNumber+ ')', stats.requestOutputThrottle,
stats.requestInputThrottle, stats.localChkInsertBytesSentAverage,
stats.localChkInsertBytesReceivedAverage, true, false);
- chkPutScheduler = new ClientRequestScheduler(true, false,
random, chkInsertStarter, node, core, schedulerConfig, "CHKinserter");
+ chkPutScheduler = new ClientRequestScheduler(true, false,
random, chkInsertStarter, node, core, schedulerConfig, "CHKinserter", ctx);
chkInsertStarter.setScheduler(chkPutScheduler);
chkInsertStarter.start();
sskRequestThrottle = new MyRequestThrottle(throttleWindow,
5000, "SSK Request", fs == null ? null : fs.subset("SSKRequestThrottle"), 1024);
sskRequestStarter = new RequestStarter(core,
sskRequestThrottle, "SSK Request starter ("+portNumber+ ')',
stats.requestOutputThrottle, stats.requestInputThrottle,
stats.localSskFetchBytesSentAverage, stats.localSskFetchBytesReceivedAverage,
false, true);
- sskFetchScheduler = new ClientRequestScheduler(false, true,
random, sskRequestStarter, node, core, schedulerConfig, "SSKrequester");
+ sskFetchScheduler = new ClientRequestScheduler(false, true,
random, sskRequestStarter, node, core, schedulerConfig, "SSKrequester", ctx);
sskRequestStarter.setScheduler(sskFetchScheduler);
sskRequestStarter.start();
//insertThrottle = new ChainedRequestThrottle(10000, 2.0F,
requestThrottle);
// FIXME reenable the above
sskInsertThrottle = new MyRequestThrottle(throttleWindow,
20000, "SSK Insert", fs == null ? null : fs.subset("SSKInsertThrottle"), 1024);
sskInsertStarter = new RequestStarter(core, sskInsertThrottle,
"SSK Insert starter ("+portNumber+ ')', stats.requestOutputThrottle,
stats.requestInputThrottle, stats.localSskInsertBytesSentAverage,
stats.localSskFetchBytesReceivedAverage, true, true);
- sskPutScheduler = new ClientRequestScheduler(true, true,
random, sskInsertStarter, node, core, schedulerConfig, "SSKinserter");
+ sskPutScheduler = new ClientRequestScheduler(true, true,
random, sskInsertStarter, node, core, schedulerConfig, "SSKinserter", ctx);
sskInsertStarter.setScheduler(sskPutScheduler);
sskInsertStarter.start();
@@ -228,11 +229,11 @@
return throttleWindow.realCurrentValue();
}
- public long countQueuedRequests() {
- return chkFetchScheduler.countQueuedRequests() +
- sskFetchScheduler.countQueuedRequests() +
- chkPutScheduler.countQueuedRequests() +
- sskPutScheduler.countQueuedRequests();
+ public long countTransientQueuedRequests() {
+ return chkFetchScheduler.countTransientQueuedRequests() +
+ sskFetchScheduler.countTransientQueuedRequests() +
+ chkPutScheduler.countTransientQueuedRequests() +
+ sskPutScheduler.countTransientQueuedRequests();
}
}
Property changes on: trunk/freenet/src/freenet/node/SSKInsertSender.java
___________________________________________________________________
Deleted: svn:mergeinfo
-
/branches/db4o/freenet/src/freenet/node/SSKInsertSender.java:24785,25282,25290,25332,25351-25352,25355-25356,25479,25488,25505,25540,25594,25673,25713-25714,25931,25977,26240-26243
/branches/db4o/src/freenet/node/SSKInsertSender.java:25594
Modified: trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java
===================================================================
--- trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -23,6 +23,7 @@
@Override
public void run() {
+ System.err.println("Shutting down...");
// First run early jobs, all at once, and wait for them to all
complete.
for(Thread r : earlyJobs) {
Modified: trunk/freenet/src/freenet/node/SendableGet.java
===================================================================
--- trunk/freenet/src/freenet/node/SendableGet.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/SendableGet.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,161 +3,113 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientRequestScheduler;
import freenet.client.async.ClientRequester;
import freenet.keys.ClientKey;
import freenet.keys.ClientKeyBlock;
import freenet.keys.Key;
-import freenet.keys.KeyBlock;
-import freenet.support.Logger;
-import freenet.support.LogThresholdCallback;
+import freenet.support.io.NativeThread;
/**
* A low-level key fetch which can be sent immediately. @see SendableRequest
*/
public abstract class SendableGet extends BaseSendableGet {
- private static volatile boolean logMINOR;
- static {
- Logger.registerLogThresholdCallback(new LogThresholdCallback() {
-
- @Override
- public void shouldUpdate() {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- }
- });
- }
- /** Is this an SSK? */
- public abstract boolean isSSK();
-
/** Parent BaseClientGetter. Required for schedulers. */
public final ClientRequester parent;
/** Get a numbered key to fetch. */
- public abstract ClientKey getKey(Object token);
+ public abstract ClientKey getKey(Object token, ObjectContainer
container);
@Override
- public Key getNodeKey(Object token) {
- ClientKey key = getKey(token);
+ public Key getNodeKey(SendableRequestItem token, ObjectContainer
container) {
+ ClientKey key = getKey(token, container);
if(key == null) return null;
return key.getNodeKey();
}
+ /**
+ * What keys are we interested in? For purposes of checking the
datastore.
+ * This is in SendableGet, *not* KeyListener, in order to deal with it
in
+ * smaller chunks.
+ * @param container Database handle.
+ */
+ public abstract Key[] listKeys(ObjectContainer container);
+
/** Get the fetch context (settings) object. */
public abstract FetchContext getContext();
/** Called when/if the low-level request succeeds. */
- public abstract void onSuccess(ClientKeyBlock block, boolean fromStore,
Object token, RequestScheduler sched);
+ public abstract void onSuccess(ClientKeyBlock block, boolean fromStore,
Object token, ObjectContainer container, ClientContext context);
/** Called when/if the low-level request fails. */
- public abstract void onFailure(LowLevelGetException e, Object token,
RequestScheduler sched);
+ public abstract void onFailure(LowLevelGetException e, Object token,
ObjectContainer container, ClientContext context);
/** Should the request ignore the datastore? */
public abstract boolean ignoreStore();
- /** If true, don't cache local requests */
- public abstract boolean dontCache();
+ /** If true, don't cache local requests
+ * @param container */
+ public abstract boolean dontCache(ObjectContainer container);
// Implementation
public SendableGet(ClientRequester parent) {
+ super(parent.persistent());
this.parent = parent;
}
- /** Do the request, blocking. Called by RequestStarter.
- * @return True if a request was executed. False if caller should try
to find another request, and remove
- * this one from the queue. */
- @Override
- public boolean send(NodeClientCore core, RequestScheduler sched, Object
keyNum) {
- ClientKey key = getKey(keyNum);
- if(key == null) {
- Logger.error(this, "Key is null in send(): keyNum =
"+keyNum+" for "+this);
- return false;
- }
- if(logMINOR)
- Logger.minor(this, "Sending get for key "+keyNum+" :
"+key);
- FetchContext ctx = getContext();
- long now = System.currentTimeMillis();
- if(getCooldownWakeupByKey(key.getNodeKey()) > now) {
- Logger.error(this, "Key is still on the cooldown queue
in send() for "+this+" - key = "+key, new Exception("error"));
- return false;
- }
- if(isCancelled()) {
- if(logMINOR) Logger.minor(this, "Cancelled: "+this);
- onFailure(new
LowLevelGetException(LowLevelGetException.CANCELLED), null, sched);
- return false;
- }
- try {
- try {
- core.realGetKey(key, ctx.localRequestOnly,
ctx.cacheLocalRequests, ctx.ignoreStore);
- } catch (LowLevelGetException e) {
- onFailure(e, keyNum, sched);
- return true;
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR), keyNum, sched);
- return true;
- }
- // Don't call onSuccess(), it will be called for us by
backdoor coalescing.
- sched.succeeded(this.getParentGrabArray());
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR), keyNum, sched);
- return true;
- }
- return true;
+ static final SendableGetRequestSender sender = new
SendableGetRequestSender();
+
+ public SendableRequestSender getSender(ObjectContainer container,
ClientContext context) {
+ return sender;
}
-
- public void schedule() {
- if(logMINOR)
- Logger.minor(this, "Scheduling "+this);
- getScheduler().register(this);
- }
- public ClientRequestScheduler getScheduler() {
+ public ClientRequestScheduler getScheduler(ClientContext context) {
if(isSSK())
- return parent.sskScheduler;
+ return context.getSskFetchScheduler();
else
- return parent.chkScheduler;
+ return context.getChkFetchScheduler();
}
- public abstract void onGotKey(Key key, KeyBlock block, RequestScheduler
sched);
-
/**
* Get the time at which the key specified by the given token will wake
up from the
* cooldown queue.
* @param token
* @return
*/
- public abstract long getCooldownWakeup(Object token);
+ public abstract long getCooldownWakeup(Object token, ObjectContainer
container);
- public abstract long getCooldownWakeupByKey(Key key);
+ public abstract long getCooldownWakeupByKey(Key key, ObjectContainer
container);
/** Reset the cooldown times when the request is reregistered. */
- public abstract void resetCooldownTimes();
+ public abstract void resetCooldownTimes(ObjectContainer container);
+ /**
+ * An internal error occurred, effecting this SendableGet,
independantly of any ChosenBlock's.
+ */
@Override
- public final void unregister(boolean staySubscribed) {
- if(!staySubscribed)
- getScheduler().removePendingKeys(this, false);
- super.unregister(staySubscribed);
+ public void internalError(final Throwable t, final RequestScheduler
sched, ObjectContainer container, ClientContext context, boolean persistent) {
+ sched.callFailure(this, new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR, t.getMessage(), t),
NativeThread.MAX_PRIORITY, persistent);
}
-
- public final void unregisterKey(Key key) {
- getScheduler().removePendingKey(this, false, key);
- }
- @Override
- public void internalError(Object keyNum, Throwable t, RequestScheduler
sched) {
- onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR, t.getMessage(), t),
keyNum, sched);
- }
-
/**
* Requeue a key after it has been on the cooldown queue for a while.
* Only requeue if our requeue time is less than or equal to the given
time.
* @param key
*/
- public abstract void requeueAfterCooldown(Key key, long time);
+ public abstract void requeueAfterCooldown(Key key, long time,
ObjectContainer container, ClientContext context);
+ public final boolean isInsert() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container, ClientContext
context) {
+ container.delete(this);
+ }
+
}
Copied: trunk/freenet/src/freenet/node/SendableGetRequestSender.java (from rev
26320, branches/db4o/freenet/src/freenet/node/SendableGetRequestSender.java)
===================================================================
--- trunk/freenet/src/freenet/node/SendableGetRequestSender.java
(rev 0)
+++ trunk/freenet/src/freenet/node/SendableGetRequestSender.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,61 @@
+package freenet.node;
+
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
+import freenet.keys.ClientKey;
+import freenet.support.LogThresholdCallback;
+import freenet.support.Logger;
+
+public class SendableGetRequestSender implements SendableRequestSender {
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ /** Do the request, blocking. Called by RequestStarter.
+ * Also responsible for deleting it.
+ * @return True if a request was executed. False if caller should try
to find another request, and remove
+ * this one from the queue. */
+ public boolean send(NodeClientCore core, final RequestScheduler sched,
ClientContext context, ChosenBlock req) {
+ Object keyNum = req.token;
+ ClientKey key = req.ckey;
+ if(key == null) {
+ Logger.error(SendableGet.class, "Key is null in send():
keyNum = "+keyNum+" for "+req);
+ return false;
+ }
+ if(logMINOR)
+ Logger.minor(SendableGet.class, "Sending get for key
"+keyNum+" : "+key);
+ if(req.isCancelled()) {
+ if(logMINOR) Logger.minor(SendableGet.class,
"Cancelled: "+req);
+ req.onFailure(new
LowLevelGetException(LowLevelGetException.CANCELLED), context);
+ return false;
+ }
+ try {
+ try {
+ core.realGetKey(key, req.localRequestOnly,
req.cacheLocalRequests, req.ignoreStore);
+ } catch (final LowLevelGetException e) {
+ req.onFailure(e, context);
+ return true;
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t, t);
+ req.onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR), context);
+ return true;
+ }
+ req.onFetchSuccess(context);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t, t);
+ req.onFailure(new
LowLevelGetException(LowLevelGetException.INTERNAL_ERROR), context);
+ return true;
+ }
+ return true;
+ }
+
+}
Modified: trunk/freenet/src/freenet/node/SendableInsert.java
===================================================================
--- trunk/freenet/src/freenet/node/SendableInsert.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/SendableInsert.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,12 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.ClientRequestScheduler;
+import freenet.support.io.NativeThread;
+
/**
* Callback interface for a low level insert, which is immediately sendable.
These
* should be registered on the ClientRequestScheduler when we want to send
them. It will
@@ -11,15 +17,32 @@
*/
public abstract class SendableInsert extends SendableRequest {
+ public SendableInsert(boolean persistent) {
+ super(persistent);
+ }
+
/** Called when we successfully insert the data */
- public abstract void onSuccess(Object keyNum);
+ public abstract void onSuccess(Object keyNum, ObjectContainer
container, ClientContext context);
/** Called when we don't! */
- public abstract void onFailure(LowLevelPutException e, Object keyNum);
+ public abstract void onFailure(LowLevelPutException e, Object keyNum,
ObjectContainer container, ClientContext context);
@Override
- public void internalError(Object keyNum, Throwable t, RequestScheduler
sched) {
- onFailure(new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, t.getMessage(), t),
keyNum);
+ public void internalError(Throwable t, RequestScheduler sched,
ObjectContainer container, ClientContext context, boolean persistent) {
+ sched.callFailure(this, new
LowLevelPutException(LowLevelPutException.INTERNAL_ERROR, t.getMessage(), t),
NativeThread.MAX_PRIORITY, persistent);
}
+ public final boolean isInsert() {
+ return true;
+ }
+
+ public ClientRequestScheduler getScheduler(ClientContext context) {
+ if(isSSK())
+ return context.getSskInsertScheduler();
+ else
+ return context.getChkInsertScheduler();
+ }
+
+ public abstract boolean cacheInserts(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/node/SendableRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/SendableRequest.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/SendableRequest.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,6 +1,15 @@
package freenet.node;
+import java.util.List;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
+import freenet.client.async.ClientRequestScheduler;
import freenet.client.async.ClientRequester;
+import freenet.client.async.PersistentChosenBlock;
+import freenet.client.async.PersistentChosenRequest;
import freenet.support.Logger;
import freenet.support.RandomGrabArray;
import freenet.support.RandomGrabArrayItem;
@@ -14,46 +23,66 @@
*/
public abstract class SendableRequest implements RandomGrabArrayItem {
+ // Since we put these into Set's etc, hashCode must be persistent.
+ private final int hashCode;
+
+ SendableRequest(boolean persistent) {
+ this.persistent = persistent;
+ this.hashCode = super.hashCode();
+ }
+
+ public final int hashCode() {
+ return hashCode;
+ }
+
protected RandomGrabArray parentGrabArray;
+ /** Member because must be accessible when only marginally activated */
+ protected final boolean persistent;
/** Get the priority class of the request. */
- public abstract short getPriorityClass();
+ public abstract short getPriorityClass(ObjectContainer container);
public abstract int getRetryCount();
/** Choose a key to fetch. Removes the block number from any internal
queues
* (but not the key itself, implementors must have a separate queue of
block
* numbers and mapping of block numbers to keys).
- * @return An object identifying a specific key. -1 indicates no keys
available. */
- public abstract Object chooseKey(KeysFetchingLocally keys);
+ * @return An object identifying a specific key. null indicates no keys
available. */
+ public abstract SendableRequestItem chooseKey(KeysFetchingLocally keys,
ObjectContainer container, ClientContext context);
/** All key identifiers. Including those not currently eligible to be
sent because
* they are on a cooldown queue, requests for them are in progress,
etc. */
- public abstract Object[] allKeys();
+ public abstract SendableRequestItem[] allKeys(ObjectContainer
container, ClientContext context);
/** All key identifiers currently eligible to be sent. Does not include
those
* currently running, on the cooldown queue etc. */
- public abstract Object[] sendableKeys();
+ public abstract SendableRequestItem[] sendableKeys(ObjectContainer
container, ClientContext context);
- /** ONLY called by RequestStarter. Start the actual request using the
NodeClientCore
- * provided, and the key and key number earlier got from chooseKey().
- * The request itself may have been removed from the overall queue
already.
- * @param sched The scheduler this request has just been grabbed from.
- * @param keyNum The key number that was fed into getKeyObject().
- * @param key The key returned from grabKey().
- * @return True if a request was sent, false otherwise (in which case
the request will
- * be removed if it hasn't already been). */
- public abstract boolean send(NodeClientCore node, RequestScheduler
sched, Object keyNum);
+ /**
+ * Get or create a SendableRequestSender for this object. This is a
non-persistent
+ * object used to send the requests. @see SendableGet.getSender().
+ * @param container A database handle may be necessary for creating it.
+ * @param context A client context may also be necessary.
+ * @return
+ */
+ public abstract SendableRequestSender getSender(ObjectContainer
container, ClientContext context);
/** If true, the request has been cancelled, or has completed, either
way it need not
* be registered any more. isEmpty() on the other hand means there are
no queued blocks.
*/
- public abstract boolean isCancelled();
+ public abstract boolean isCancelled(ObjectContainer container);
- /** Get client context object */
- public abstract Object getClient();
+ /** Get client context object. This isn't called as frequently as you
might expect
+ * - once on registration, and then when there is an error. So it
doesn't need to be
+ * stored on the request itself, hence we pass in a container. */
+ public abstract RequestClient getClient(ObjectContainer container);
- /** Get the ClientRequest */
+ /** Is this request persistent? MUST NOT CHANGE. */
+ public final boolean persistent() {
+ return persistent;
+ }
+
+ /** Get the ClientRequest. This DOES need to be cached on the request
itself. */
public abstract ClientRequester getClientRequest();
public synchronized RandomGrabArray getParentGrabArray() {
@@ -64,22 +93,44 @@
return true;
}
- public synchronized void setParentGrabArray(RandomGrabArray parent) {
+ public synchronized void setParentGrabArray(RandomGrabArray parent,
ObjectContainer container) {
parentGrabArray = parent;
+ if(persistent())
+ container.store(this);
}
- public void unregister(boolean staySubscribed) {
+ public void unregister(ObjectContainer container, ClientContext
context) {
RandomGrabArray arr = getParentGrabArray();
if(arr != null) {
- arr.remove(this);
+ if(persistent)
+ container.activate(arr, 1);
+ arr.remove(this, container);
} else {
// Should this be a higher priority?
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Cannot unregister "+this+"
: not registered", new Exception("debug"));
}
+ ClientRequester cr = getClientRequest();
+ if(persistent)
+ container.activate(cr, 1);
+ getScheduler(context).removeFromAllRequestsByClientRequest(cr,
this, true, container);
+ // FIXME should we deactivate??
+ //if(persistent) container.deactivate(cr, 1);
}
+
+ public abstract ClientRequestScheduler getScheduler(ClientContext
context);
+ /** Is this an SSK? For purposes of determining which scheduler to use.
*/
+ public abstract boolean isSSK();
+
+ /** Is this an insert? For purposes of determining which scheduler to
use. */
+ public abstract boolean isInsert();
+
/** Requeue after an internal error */
- public abstract void internalError(Object keyNum, Throwable t,
RequestScheduler sched);
+ public abstract void internalError(Throwable t, RequestScheduler sched,
ObjectContainer container, ClientContext context, boolean persistent);
+ /** Construct a full set of ChosenBlock's for a persistent request.
These are transient, so we will need to clone keys
+ * etc. */
+ public abstract List<PersistentChosenBlock>
makeBlocks(PersistentChosenRequest request, RequestScheduler sched,
ObjectContainer container, ClientContext context);
+
}
Copied: trunk/freenet/src/freenet/node/SendableRequestItem.java (from rev
26320, branches/db4o/freenet/src/freenet/node/SendableRequestItem.java)
===================================================================
--- trunk/freenet/src/freenet/node/SendableRequestItem.java
(rev 0)
+++ trunk/freenet/src/freenet/node/SendableRequestItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,16 @@
+package freenet.node;
+
+/**
+ * A SendableRequest may include many SendableRequestItem's.
+ * Typically for requests, these are just an integer indicating which key
+ * to fetch. But for inserts, these will often include the actual data to
+ * insert, or some means of getting it without access to the database.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public interface SendableRequestItem {
+
+ /** Called when a request is abandoned. Whether this is called on
+ * a successful request is up to the SendableRequestSender. */
+ public void dump();
+
+}
Copied: trunk/freenet/src/freenet/node/SendableRequestSender.java (from rev
26320, branches/db4o/freenet/src/freenet/node/SendableRequestSender.java)
===================================================================
--- trunk/freenet/src/freenet/node/SendableRequestSender.java
(rev 0)
+++ trunk/freenet/src/freenet/node/SendableRequestSender.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,26 @@
+package freenet.node;
+
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
+
+/**
+ * Interface for class responsible for doing the actual sending of requests.
+ * Strictly non-persistent.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public interface SendableRequestSender {
+
+ /** ONLY called by RequestStarter. Start the actual request using the
NodeClientCore
+ * provided, and the key and key number earlier got from chooseKey().
+ * The request itself may have been removed from the overall queue
already. For
+ * persistent requests, the callbacks will be called on the database
thread, and we
+ * will delete the PersistentChosenRequest from there before committing.
+ * @param sched The scheduler this request has just been grabbed from.
+ * @param keyNum The key number that was fed into getKeyObject().
+ * @param key The key returned from grabKey().
+ * @param ckey The client key for decoding, if available (mandatory for
SendableGet, null otherwise).
+ * @return True if a request was sent, false otherwise (in which case
the request will
+ * be removed if it hasn't already been). */
+ public abstract boolean send(NodeClientCore node, RequestScheduler
sched, ClientContext context, ChosenBlock request);
+
+}
Deleted: trunk/freenet/src/freenet/node/SessionKey.java
===================================================================
--- trunk/freenet/src/freenet/node/SessionKey.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/SessionKey.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,40 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
- * Public License, version 2 (or at your option any later version). See
- * http://www.gnu.org/ for further details of the GPL. */
-package freenet.node;
-
-import freenet.crypt.BlockCipher;
-
-/**
- * Class representing a single session key.
- * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
- */
-public class SessionKey {
-
- /** A PacketTracker may have more than one SessionKey, but a SessionKey
- * may only have one PacketTracker. In other words, in some cases it is
- * possible to change the session key without invalidating the packet
- * sequence, but it is never possible to invalidate the packet sequence
- * without changing the session key. */
- final PacketTracker packets;
-
- /** Parent PeerNode */
- public final PeerNode pn;
- /** Cipher to both encrypt outgoing packets with and decrypt
- * incoming ones. */
- public final BlockCipher sessionCipher;
- /** Key for above cipher, so far for debugging */
- public final byte[] sessionKey;
-
- SessionKey(PeerNode parent, PacketTracker tracker, BlockCipher cipher,
byte[] sessionKey) {
- this.pn = parent;
- this.packets = tracker;
- this.sessionCipher = cipher;
- this.sessionKey = sessionKey;
- }
-
- public String toString() {
- return super.toString()+":"+packets.toString();
- }
-
-}
Copied: trunk/freenet/src/freenet/node/SessionKey.java (from rev 26320,
branches/db4o/freenet/src/freenet/node/SessionKey.java)
===================================================================
--- trunk/freenet/src/freenet/node/SessionKey.java
(rev 0)
+++ trunk/freenet/src/freenet/node/SessionKey.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,40 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.node;
+
+import freenet.crypt.BlockCipher;
+
+/**
+ * Class representing a single session key.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class SessionKey {
+
+ /** A PacketTracker may have more than one SessionKey, but a SessionKey
+ * may only have one PacketTracker. In other words, in some cases it is
+ * possible to change the session key without invalidating the packet
+ * sequence, but it is never possible to invalidate the packet sequence
+ * without changing the session key. */
+ final PacketTracker packets;
+
+ /** Parent PeerNode */
+ public final PeerNode pn;
+ /** Cipher to both encrypt outgoing packets with and decrypt
+ * incoming ones. */
+ public final BlockCipher sessionCipher;
+ /** Key for above cipher, so far for debugging */
+ public final byte[] sessionKey;
+
+ SessionKey(PeerNode parent, PacketTracker tracker, BlockCipher cipher,
byte[] sessionKey) {
+ this.pn = parent;
+ this.packets = tracker;
+ this.sessionCipher = cipher;
+ this.sessionKey = sessionKey;
+ }
+
+ public String toString() {
+ return super.toString()+":"+packets.toString();
+ }
+
+}
Modified: trunk/freenet/src/freenet/node/SimpleSendableInsert.java
===================================================================
--- trunk/freenet/src/freenet/node/SimpleSendableInsert.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/SimpleSendableInsert.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,12 +3,22 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node;
+import java.util.List;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ChosenBlock;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientRequestScheduler;
import freenet.client.async.ClientRequester;
+import freenet.client.async.PersistentChosenBlock;
+import freenet.client.async.PersistentChosenRequest;
import freenet.keys.CHKBlock;
+import freenet.keys.ClientKey;
import freenet.keys.KeyBlock;
import freenet.keys.SSKBlock;
import freenet.support.Logger;
+import freenet.support.io.NativeThread;
/**
* Simple SendableInsert implementation. No feedback, no retries, just insert
the
@@ -20,22 +30,26 @@
public final KeyBlock block;
public final short prioClass;
private boolean finished;
- public final Object client;
+ public final RequestClient client;
public final ClientRequestScheduler scheduler;
public SimpleSendableInsert(NodeClientCore core, KeyBlock block, short
prioClass) {
+ super(false);
this.block = block;
this.prioClass = prioClass;
- this.client = core;
+ this.client = core.node.nonPersistentClient;
if(block instanceof CHKBlock)
scheduler = core.requestStarters.chkPutScheduler;
else if(block instanceof SSKBlock)
scheduler = core.requestStarters.sskPutScheduler;
else
throw new IllegalArgumentException("Don't know what to
do with "+block);
+ if(!scheduler.isInsertScheduler())
+ throw new IllegalStateException("Scheduler
"+scheduler+" is not an insert scheduler!");
}
- public SimpleSendableInsert(KeyBlock block, short prioClass, Object
client, ClientRequestScheduler scheduler) {
+ public SimpleSendableInsert(KeyBlock block, short prioClass,
RequestClient client, ClientRequestScheduler scheduler) {
+ super(false);
this.block = block;
this.prioClass = prioClass;
this.client = client;
@@ -43,20 +57,20 @@
}
@Override
- public void onSuccess(Object keyNum) {
+ public void onSuccess(Object keyNum, ObjectContainer container,
ClientContext context) {
// Yay!
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Finished insert of "+block);
}
@Override
- public void onFailure(LowLevelPutException e, Object keyNum) {
+ public void onFailure(LowLevelPutException e, Object keyNum,
ObjectContainer container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Failed insert of "+block+": "+e);
}
@Override
- public short getPriorityClass() {
+ public short getPriorityClass(ObjectContainer container) {
return prioClass;
}
@@ -67,26 +81,31 @@
}
@Override
- public boolean send(NodeClientCore core, RequestScheduler sched, Object
keyNum) {
- // Ignore keyNum, key, since this is a single block
- boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
- try {
- if(logMINOR) Logger.minor(this, "Starting request:
"+this);
- core.realPut(block, shouldCache());
- } catch (LowLevelPutException e) {
- onFailure(e, keyNum);
- if(logMINOR) Logger.minor(this, "Request failed:
"+this+" for "+e);
- return true;
- } finally {
- finished = true;
- }
- if(logMINOR) Logger.minor(this, "Request succeeded: "+this);
- onSuccess(keyNum);
- return true;
+ public SendableRequestSender getSender(ObjectContainer container,
ClientContext context) {
+ return new SendableRequestSender() {
+
+ public boolean send(NodeClientCore core,
RequestScheduler sched, ClientContext context, ChosenBlock req) {
+ // Ignore keyNum, key, since this is a single
block
+ boolean logMINOR =
Logger.shouldLog(Logger.MINOR, this);
+ try {
+ if(logMINOR) Logger.minor(this,
"Starting request: "+this);
+ core.realPut(block, shouldCache());
+ } catch (LowLevelPutException e) {
+ onFailure(e, req.token, null, context);
+ if(logMINOR) Logger.minor(this,
"Request failed: "+this+" for "+e);
+ return true;
+ } finally {
+ finished = true;
+ }
+ if(logMINOR) Logger.minor(this, "Request
succeeded: "+this);
+ onSuccess(req.token, null, context);
+ return true;
+ }
+ };
}
@Override
- public Object getClient() {
+ public RequestClient getClient(ObjectContainer container) {
return client;
}
@@ -96,29 +115,25 @@
}
@Override
- public boolean isCancelled() {
+ public boolean isCancelled(ObjectContainer container) {
return finished;
}
- public boolean isEmpty() {
+ public boolean isEmpty(ObjectContainer container) {
return finished;
}
- public boolean canRemove() {
- return true;
- }
-
public void schedule() {
finished = false; // can reschedule
- scheduler.register(this);
+ scheduler.registerInsert(this, false, false, null);
}
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
synchronized(this) {
if(finished) return;
finished = true;
}
- super.unregister(false);
+ super.unregister(container, context);
}
public boolean shouldCache() {
@@ -126,22 +141,47 @@
return false;
}
+ private static SendableRequestItem nullItem = new SendableRequestItem()
{
+
+ public void dump() {
+ // No problem
+ }
+
+ };
+
@Override
- public synchronized Object[] allKeys() {
- if(finished) return new Object[] {};
- return new Object[] { 0 };
+ public synchronized SendableRequestItem[] allKeys(ObjectContainer
container, ClientContext context) {
+ if(finished) return new SendableRequestItem[] {};
+ return new SendableRequestItem[] {
NullSendableRequestItem.nullItem };
}
@Override
- public synchronized Object[] sendableKeys() {
- if(finished) return new Object[] {};
- return new Object[] { 0 };
+ public synchronized SendableRequestItem[] sendableKeys(ObjectContainer
container, ClientContext context) {
+ if(finished) return new SendableRequestItem[] {};
+ return new SendableRequestItem[] {
NullSendableRequestItem.nullItem };
}
@Override
- public synchronized Object chooseKey(KeysFetchingLocally keys) {
+ public synchronized SendableRequestItem chooseKey(KeysFetchingLocally
keys, ObjectContainer container, ClientContext context) {
+ if(keys.hasTransientInsert(this,
NullSendableRequestItem.nullItem))
+ return null;
if(finished) return null;
else
- return 0;
+ return NullSendableRequestItem.nullItem;
}
+
+ public boolean isSSK() {
+ return block instanceof SSKBlock;
+ }
+
+ @Override
+ public List<PersistentChosenBlock> makeBlocks(PersistentChosenRequest
request, RequestScheduler sched, ObjectContainer container, ClientContext
context) {
+ // Transient-only so no makeBlocks().
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean cacheInserts(ObjectContainer container) {
+ return scheduler.cacheInserts();
+ }
}
Copied: trunk/freenet/src/freenet/node/SupportsBulkCallFailure.java (from rev
26320, branches/db4o/freenet/src/freenet/node/SupportsBulkCallFailure.java)
===================================================================
--- trunk/freenet/src/freenet/node/SupportsBulkCallFailure.java
(rev 0)
+++ trunk/freenet/src/freenet/node/SupportsBulkCallFailure.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,16 @@
+package freenet.node;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+
+/**
+ * Normally only implemented by SendableGet's. YOU MUST ALSO IMPLEMENT
equals() and a hashCode() consistent with it!
+ * @author toad
+ *
+ */
+public interface SupportsBulkCallFailure {
+
+ /** Process a whole batch of failures at once. */
+ public abstract void onFailure(BulkCallFailureItem[] items,
ObjectContainer container, ClientContext context);
+}
Modified: trunk/freenet/src/freenet/node/TextModeClientInterface.java
===================================================================
--- trunk/freenet/src/freenet/node/TextModeClientInterface.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/TextModeClientInterface.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -77,7 +77,7 @@
this.downloadsDir = server.downloadsDir;
this.in = in;
this.out = out;
- client.addGlobalHook(new EventDumper(new PrintWriter(out, true)));
+ client.addGlobalHook(new EventDumper(new PrintWriter(out, true),
false));
}
public TextModeClientInterface(Node n, HighLevelSimpleClient c, File
downloadDir, InputStream in, OutputStream out) {
@@ -88,7 +88,7 @@
this.downloadsDir = downloadDir;
this.in = in;
this.out = out;
- client.addGlobalHook(new EventDumper(new PrintWriter(out, true)));
+ client.addGlobalHook(new EventDumper(new PrintWriter(out, true),
false));
}
public void run() {
Modified: trunk/freenet/src/freenet/node/fcp/AddPeer.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/AddPeer.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/AddPeer.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -14,6 +14,8 @@
import java.net.URLConnection;
import java.util.Arrays;
+import com.db4o.ObjectContainer;
+
import freenet.io.comm.PeerParseException;
import freenet.io.comm.ReferenceSignatureVerificationException;
import freenet.node.FSParseException;
@@ -158,4 +160,8 @@
handler.outputHandler.queue(new PeerMessage(pn, true, true,
identifier));
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/AllDataMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/AllDataMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/AllDataMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -67,4 +69,8 @@
return global;
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientGet.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGet.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientGet.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -7,6 +7,8 @@
import java.io.IOException;
import java.util.HashSet;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -14,8 +16,10 @@
import freenet.client.async.BaseClientPutter;
import freenet.client.async.BinaryBlob;
import freenet.client.async.ClientCallback;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientGetter;
import freenet.client.async.ClientRequester;
+import freenet.client.async.DBJob;
import freenet.client.events.ClientEvent;
import freenet.client.events.ClientEventListener;
import freenet.client.events.SplitfileProgressEvent;
@@ -27,6 +31,7 @@
import freenet.support.io.CannotCreateFromFieldSetException;
import freenet.support.io.FileBucket;
import freenet.support.io.FileUtil;
+import freenet.support.io.NativeThread;
import freenet.support.io.NullBucket;
import freenet.support.io.SerializableToFieldSetBucketUtil;
@@ -36,6 +41,8 @@
*/
public class ClientGet extends ClientRequest implements ClientCallback,
ClientEventListener {
+ /** Fetch context. Never passed in: always created new by the
ClientGet. Therefore, we
+ * can safely delete it in requestWasRemoved(). */
private final FetchContext fctx;
private final ClientGetter getter;
private final short returnType;
@@ -70,16 +77,17 @@
* Create one for a global-queued request not made by FCP.
* @throws IdentifierCollisionException
* @throws NotAllowedException
+ * @throws IOException
*/
public ClientGet(FCPClient globalClient, FreenetURI uri, boolean
dsOnly, boolean ignoreDS,
int maxSplitfileRetries, int maxNonSplitfileRetries,
long maxOutputLength,
short returnType, boolean persistRebootOnly, String
identifier, int verbosity, short prioClass,
- File returnFilename, File returnTempFilename) throws
IdentifierCollisionException, NotAllowedException {
+ File returnFilename, File returnTempFilename, FCPServer
server) throws IdentifierCollisionException, NotAllowedException, IOException {
super(uri, identifier, verbosity, null, globalClient, prioClass,
(persistRebootOnly ?
ClientRequest.PERSIST_REBOOT : ClientRequest.PERSIST_FOREVER),
null, true);
- fctx = new FetchContext(client.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false);
+ fctx = new FetchContext(server.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false, null);
fctx.eventProducer.addEventListener(this);
fctx.localRequestOnly = dsOnly;
fctx.ignoreStore = ignoreDS;
@@ -93,7 +101,7 @@
if(returnType == ClientGetMessage.RETURN_TYPE_DISK) {
this.targetFile = returnFilename;
this.tempFile = returnTempFilename;
- if(!(client.core.allowDownloadTo(returnTempFilename) &&
client.core.allowDownloadTo(returnFilename)))
+ if(!(server.core.allowDownloadTo(returnTempFilename) &&
server.core.allowDownloadTo(returnFilename)))
throw new NotAllowedException();
ret = new FileBucket(returnTempFilename, false, true,
false, false, false);
} else if(returnType == ClientGetMessage.RETURN_TYPE_NONE) {
@@ -103,41 +111,23 @@
} else {
targetFile = null;
tempFile = null;
- try {
if(persistenceType == PERSIST_FOREVER)
- ret =
client.server.core.persistentTempBucketFactory.makeBucket(maxOutputLength);
+ ret =
server.core.persistentTempBucketFactory.makeBucket(maxOutputLength);
else
- ret =
fctx.bucketFactory.makeBucket(maxOutputLength);
- } catch (IOException e) {
- Logger.error(this, "Cannot create bucket for
temp storage: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), null);
- getter = null;
- returnBucket = null;
- return;
- }
+ ret =
server.core.tempBucketFactory.makeBucket(maxOutputLength);
}
returnBucket = ret;
- if(persistenceType != PERSIST_CONNECTION) {
- try {
- client.register(this, false);
- } catch (IdentifierCollisionException e) {
- ret.free();
- throw e;
- }
- }
- getter = new ClientGetter(this,
client.core.requestStarters.chkFetchScheduler,
client.core.requestStarters.sskFetchScheduler, uri, fctx, priorityClass,
client.lowLevelClient, returnBucket, null);
- if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- }
+ getter = new ClientGetter(this, uri, fctx,
priorityClass,
+ lowLevelClient,
+ returnBucket, null);
}
-
- public ClientGet(FCPConnectionHandler handler, ClientGetMessage
message) throws IdentifierCollisionException, MessageInvalidException {
+
+ public ClientGet(FCPConnectionHandler handler, ClientGetMessage
message, FCPServer server) throws IdentifierCollisionException,
MessageInvalidException {
super(message.uri, message.identifier, message.verbosity,
handler, message.priorityClass,
message.persistenceType, message.clientToken,
message.global);
// Create a Fetcher directly in order to get more fine-grained
control,
// since the client may override a few context elements.
- fctx = new FetchContext(client.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false);
+ fctx = new FetchContext(server.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false, null);
fctx.eventProducer.addEventListener(this);
// ignoreDS
fctx.localRequestOnly = message.dsOnly;
@@ -161,7 +151,7 @@
if(returnType == ClientGetMessage.RETURN_TYPE_DISK) {
this.targetFile = message.diskFile;
this.tempFile = message.tempFile;
- if(!(client.core.allowDownloadTo(tempFile) &&
client.core.allowDownloadTo(targetFile)))
+ if(!(server.core.allowDownloadTo(tempFile) &&
server.core.allowDownloadTo(targetFile)))
throw new
MessageInvalidException(ProtocolErrorMessage.ACCESS_DENIED, "Not allowed to
download to "+tempFile+" or "+targetFile, identifier, global);
else if(!(handler.allowDDAFrom(tempFile, true) &&
handler.allowDDAFrom(targetFile, true)))
throw new
MessageInvalidException(ProtocolErrorMessage.DIRECT_DISK_ACCESS_DENIED, "Not
allowed to download to "+tempFile+" or "+targetFile + ". You might need to do a
" + TestDDARequestMessage.NAME + " first.", identifier, global);
@@ -175,38 +165,24 @@
tempFile = null;
try {
if(persistenceType == PERSIST_FOREVER)
- ret =
client.server.core.persistentTempBucketFactory.makeBucket(fctx.maxOutputLength);
+ ret =
server.core.persistentTempBucketFactory.makeBucket(fctx.maxOutputLength);
else
- ret =
fctx.bucketFactory.makeBucket(fctx.maxOutputLength);
+ ret =
server.core.tempBucketFactory.makeBucket(fctx.maxOutputLength);
} catch (IOException e) {
Logger.error(this, "Cannot create bucket for
temp storage: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), null);
getter = null;
returnBucket = null;
- return;
+ // This is *not* a FetchException since we
don't register it: it's a protocol error.
+ throw new
MessageInvalidException(ProtocolErrorMessage.INTERNAL_ERROR, "Cannot create
bucket for temporary storage (out of disk space???): "+e, identifier, global);
}
}
if(ret == null)
Logger.error(this, "Impossible: ret = null in FCP
constructor for "+this, new Exception("debug"));
returnBucket = ret;
- if(persistenceType != PERSIST_CONNECTION) {
- try {
- client.register(this, false);
- } catch (IdentifierCollisionException e) {
- ret.free();
- throw e;
- }
- }
- getter = new ClientGetter(this,
client.core.requestStarters.chkFetchScheduler,
- client.core.requestStarters.sskFetchScheduler,
uri, fctx, priorityClass,
- client.lowLevelClient, binaryBlob ? new
NullBucket() : returnBucket,
- binaryBlob ? returnBucket :
null);
- if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- if(handler != null && (!handler.isGlobalSubscribed()))
- handler.outputHandler.queue(msg);
- }
+ getter = new ClientGetter(this,
+ uri, fctx, priorityClass,
+ lowLevelClient,
+ binaryBlob ? new NullBucket() :
returnBucket, binaryBlob ? returnBucket : null);
}
/**
@@ -214,8 +190,9 @@
* Can throw, and does minimal verification, as is dealing with data
* supposedly serialized out by the node.
* @throws IOException
+ * @throws FetchException
*/
- public ClientGet(SimpleFieldSet fs, FCPClient client2) throws
IOException {
+ public ClientGet(SimpleFieldSet fs, FCPClient client2, FCPServer
server) throws IOException, FetchException {
super(fs, client2);
returnType =
ClientGetMessage.parseValidReturnType(fs.get("ReturnType"));
@@ -232,7 +209,7 @@
boolean ignoreDS = Fields.stringToBool(fs.get("IgnoreDS"),
false);
boolean dsOnly = Fields.stringToBool(fs.get("DSOnly"), false);
int maxRetries = Integer.parseInt(fs.get("MaxRetries"));
- fctx = new FetchContext(client.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false);
+ fctx = new FetchContext(server.defaultFetchContext,
FetchContext.IDENTICAL_MASK, false, null);
fctx.eventProducer.addEventListener(this);
// ignoreDS
fctx.localRequestOnly = dsOnly;
@@ -263,22 +240,21 @@
ret = new NullBucket();
} else if(returnType == ClientGetMessage.RETURN_TYPE_DIRECT) {
try {
- ret =
SerializableToFieldSetBucketUtil.create(fs.subset("ReturnBucket"), fctx.random,
client.server.core.persistentTempBucketFactory);
+ ret =
SerializableToFieldSetBucketUtil.create(fs.subset("ReturnBucket"),
server.core.random, server.core.persistentTempBucketFactory);
if(ret == null) throw new
CannotCreateFromFieldSetException("ret == null");
} catch (CannotCreateFromFieldSetException e) {
Logger.error(this, "Cannot read: "+this+" :
"+e, e);
try {
// Create a new temp bucket
if(persistenceType == PERSIST_FOREVER)
- ret =
client.server.core.persistentTempBucketFactory.makeBucket(fctx.maxOutputLength);
+ ret =
server.core.persistentTempBucketFactory.makeBucket(fctx.maxOutputLength);
else
- ret =
fctx.bucketFactory.makeBucket(fctx.maxOutputLength);
+ ret =
server.core.tempBucketFactory.makeBucket(fctx.maxOutputLength);
} catch (IOException e1) {
Logger.error(this, "Cannot create
bucket for temp storage: "+e, e);
- onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), null);
getter = null;
returnBucket = null;
- return;
+ throw new
FetchException(FetchException.BUCKET_ERROR, e);
}
}
} else {
@@ -301,31 +277,49 @@
fctx.allowedMIMETypes.add(a);
}
- getter = new ClientGetter(this,
client.core.requestStarters.chkFetchScheduler,
- client.core.requestStarters.sskFetchScheduler,
uri,
- fctx, priorityClass, client.lowLevelClient,
+ getter = new ClientGetter(this,
+ uri,
+ fctx, priorityClass,
+ lowLevelClient,
binaryBlob ? new NullBucket() : returnBucket,
binaryBlob ? returnBucket :
null);
- if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- }
-
if(finished && succeeded)
allDataPending = new AllDataMessage(returnBucket,
identifier, global, startupTime, completionTime, this.foundDataMimeType);
}
+ /**
+ * Must be called just after construction, but within a transaction.
+ * @throws IdentifierCollisionException If the identifier is already in
use.
+ */
+ void register(ObjectContainer container, boolean lazyResume, boolean
noTags) throws IdentifierCollisionException {
+ if(client != null)
+ assert(this.persistenceType == client.persistenceType);
+ if(persistenceType != PERSIST_CONNECTION)
+ try {
+ client.register(this, lazyResume, container);
+ } catch (IdentifierCollisionException e) {
+ returnBucket.free();
+ if(persistenceType == PERSIST_FOREVER)
+ returnBucket.removeFrom(container);
+ throw e;
+ }
+ if(persistenceType != PERSIST_CONNECTION && !noTags) {
+ FCPMessage msg =
persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0,
container);
+ }
+ }
+
@Override
- public void start() {
+ public void start(ObjectContainer container, ClientContext context) {
try {
synchronized(this) {
if(finished) return;
}
- getter.start();
+ getter.start(container, context);
if(persistenceType != PERSIST_CONNECTION && !finished) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
+ FCPMessage msg =
persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0,
container);
}
synchronized(this) {
started = true;
@@ -334,31 +328,45 @@
synchronized(this) {
started = true;
} // before the failure handler
- onFailure(e, null);
+ onFailure(e, null, container);
} catch (Throwable t) {
synchronized(this) {
started = true;
}
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR, t), null);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR, t), null, container);
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this); // Update
}
@Override
- public void onLostConnection() {
+ public void onLostConnection(ObjectContainer container, ClientContext
context) {
if(persistenceType == PERSIST_CONNECTION)
- cancel();
+ cancel(container, context);
// Otherwise ignore
}
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
Logger.minor(this, "Succeeded: "+identifier);
Bucket data = result.asBucket();
+ if(persistenceType == PERSIST_FOREVER) {
+ if(data != null)
+ container.activate(data, 5);
+ if(returnBucket != null)
+ container.activate(returnBucket, 5);
+ container.activate(client, 1);
+ if(tempFile != null)
+ container.activate(tempFile, 5);
+ if(targetFile != null)
+ container.activate(targetFile, 5);
+ }
if(returnBucket != data && !binaryBlob) {
boolean failed = true;
synchronized(this) {
if(finished) {
Logger.error(this, "Already finished
but onSuccess() for "+this+" data = "+data, new Exception("debug"));
data.free();
+ if(persistenceType == PERSIST_FOREVER)
data.removeFrom(container);
return; // Already failed - bucket
error maybe??
}
if(returnType ==
ClientGetMessage.RETURN_TYPE_DIRECT && returnBucket == null) {
@@ -368,10 +376,18 @@
failed = false;
}
}
+ if(failed && persistenceType == PERSIST_FOREVER) {
+ if(container.ext().getID(returnBucket) ==
container.ext().getID(data)) {
+ Logger.error(this, "DB4O BUG DETECTED
WITHOUT ARRAY HANDLING! EVIL HORRIBLE BUG!
UID(returnBucket)="+container.ext().getID(returnBucket)+" for "+returnBucket+"
active="+container.ext().isActive(returnBucket)+" stored =
"+container.ext().isStored(returnBucket)+" but
UID(data)="+container.ext().getID(data)+" for "+data+" active =
"+container.ext().isActive(data)+" stored = "+container.ext().isStored(data));
+ // Succeed anyway, hope that the
returned bucket is consistent...
+ returnBucket = data;
+ failed = false;
+ }
+ }
if(failed) {
Logger.error(this, "returnBucket =
"+returnBucket+" but onSuccess() data = "+data, new Exception("debug"));
// Caller guarantees that data == returnBucket
- onFailure(new
FetchException(FetchException.INTERNAL_ERROR, "Data != returnBucket"), null);
+ onFailure(new
FetchException(FetchException.INTERNAL_ERROR, "Data != returnBucket"), null,
container);
return;
}
}
@@ -410,22 +426,32 @@
}
returnBucket = new FileBucket(targetFile,
false, true, false, false, false);
}
+ if(persistenceType == PERSIST_FOREVER &&
progressPending != null) {
+ container.activate(progressPending, 1);
+ progressPending.removeFrom(container);
+ }
progressPending = null;
this.foundDataLength = returnBucket.size();
this.succeeded = true;
finished = true;
}
- trySendDataFoundOrGetFailed(null);
+ trySendDataFoundOrGetFailed(null, container);
if(adm != null)
- trySendAllDataMessage(adm, null);
- if(!dontFree)
+ trySendAllDataMessage(adm, null, container);
+ if(!dontFree) {
data.free();
- finish();
- client.notifySuccess(this);
+ }
+ if(persistenceType == PERSIST_FOREVER) {
+ returnBucket.storeTo(container);
+ container.store(this);
+ }
+ finish(container);
+ if(client != null)
+ client.notifySuccess(this, container);
}
- private void trySendDataFoundOrGetFailed(FCPConnectionOutputHandler
handler) {
+ private void trySendDataFoundOrGetFailed(FCPConnectionOutputHandler
handler, ObjectContainer container) {
FCPMessage msg;
// Don't need to lock. succeeded is only ever set, never unset.
@@ -434,67 +460,112 @@
msg = new DataFoundMessage(foundDataLength,
foundDataMimeType, identifier, global);
} else {
msg = getFailedMessage;
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(msg, 5);
}
+ if(handler == null && persistenceType == PERSIST_CONNECTION)
+ handler = origHandler.outputHandler;
if(handler != null)
handler.queue(msg);
else
- client.queueClientRequestMessage(msg, 0);
+ client.queueClientRequestMessage(msg, 0, container);
if(postFetchProtocolErrorMessage != null) {
+ if(persistenceType == PERSIST_FOREVER)
+
container.activate(postFetchProtocolErrorMessage, 5);
if(handler != null)
handler.queue(postFetchProtocolErrorMessage);
- else
-
client.queueClientRequestMessage(postFetchProtocolErrorMessage, 0);
+ else {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(client, 1);
+
client.queueClientRequestMessage(postFetchProtocolErrorMessage, 0, container);
+ }
}
}
- private void trySendAllDataMessage(AllDataMessage msg,
FCPConnectionOutputHandler handler) {
+ private void trySendAllDataMessage(AllDataMessage msg,
FCPConnectionOutputHandler handler, ObjectContainer container) {
if(persistenceType != ClientRequest.PERSIST_CONNECTION) {
allDataPending = msg;
- } else {
- client.queueClientRequestMessage(msg, 0);
+ if(persistenceType == ClientRequest.PERSIST_FOREVER) {
+ container.store(this);
+ }
+ return;
}
+ if(handler == null)
+ handler = origHandler.outputHandler;
+
+ handler.queue(msg);
}
- private void trySendProgress(SimpleProgressMessage msg,
FCPConnectionOutputHandler handler) {
+ private void trySendProgress(SimpleProgressMessage msg,
FCPConnectionOutputHandler handler, ObjectContainer container) {
if(persistenceType != ClientRequest.PERSIST_CONNECTION) {
+ FCPMessage oldProgress = progressPending;
progressPending = msg;
+ if(persistenceType == ClientRequest.PERSIST_FOREVER) {
+ container.store(this);
+ if(oldProgress != null) {
+ container.activate(oldProgress, 1);
+ oldProgress.removeFrom(container);
+ }
+ }
}
- client.queueClientRequestMessage(msg,
VERBOSITY_SPLITFILE_PROGRESS);
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(client, 1);
+ if(persistenceType == PERSIST_CONNECTION && handler == null)
+ handler = origHandler.outputHandler;
+ if(handler != null)
+ handler.queue(msg);
+ else
+ client.queueClientRequestMessage(msg,
VERBOSITY_SPLITFILE_PROGRESS, container);
+ if(persistenceType == PERSIST_FOREVER && !client.isGlobalQueue)
+ container.deactivate(client, 1);
}
@Override
- public void sendPendingMessages(FCPConnectionOutputHandler handler,
boolean includePersistentRequest, boolean includeData, boolean onlyData) {
+ public void sendPendingMessages(FCPConnectionOutputHandler handler,
boolean includePersistentRequest, boolean includeData, boolean onlyData,
ObjectContainer container) {
if(persistenceType == ClientRequest.PERSIST_CONNECTION) {
Logger.error(this, "WTF?
persistenceType="+persistenceType, new Exception("error"));
return;
}
if(!onlyData) {
if(includePersistentRequest) {
- FCPMessage msg = persistentTagMessage();
+ FCPMessage msg =
persistentTagMessage(container);
handler.queue(msg);
}
- if(progressPending != null)
+ if(progressPending != null) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(progressPending, 5);
handler.queue(progressPending);
+ }
if(finished)
- trySendDataFoundOrGetFailed(handler);
+ trySendDataFoundOrGetFailed(handler, container);
}
if (onlyData && allDataPending == null) {
Logger.error(this, "No data pending !");
}
- if(includeData && (allDataPending != null))
+ if(includeData && (allDataPending != null)) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(allDataPending, 5);
handler.queue(allDataPending);
+ }
}
@Override
- protected FCPMessage persistentTagMessage() {
+ protected FCPMessage persistentTagMessage(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(uri, 5);
+ container.activate(fctx, 1);
+ container.activate(client, 1);
+ container.activate(targetFile, 5);
+ container.activate(tempFile, 5);
+ }
return new PersistentGet(identifier, uri, verbosity,
priorityClass, returnType, persistenceType, targetFile, tempFile, clientToken,
client.isGlobalQueue, started, fctx.maxNonSplitfileRetries, binaryBlob,
fctx.maxOutputLength);
}
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
if(finished) return;
synchronized(this) {
succeeded = false;
@@ -504,28 +575,34 @@
}
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Caught "+e, e);
- trySendDataFoundOrGetFailed(null);
- finish();
- freeData();
- client.notifyFailure(this);
- if(persistenceType != PERSIST_CONNECTION)
- client.server.forceStorePersistentRequests();
+ trySendDataFoundOrGetFailed(null, container);
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(client, 1);
+ }
+ // We do not want the data to be removed on failure, because
the request
+ // may be restarted, and the bucket persists on the getter,
even if we get rid of it here.
+ //freeData(container);
+ finish(container);
+ if(client != null)
+ client.notifyFailure(this, container);
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
// Ignore
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
// Ignore
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
// Ignore
}
@Override
- public void requestWasRemoved() {
+ public void requestWasRemoved(ObjectContainer container, ClientContext
context) {
// if request is still running, send a GetFailed with
code=cancelled
if( !finished ) {
synchronized(this) {
@@ -534,25 +611,68 @@
FetchException cancelled = new
FetchException(FetchException.CANCELLED);
getFailedMessage = new
GetFailedMessage(cancelled, identifier, global);
}
- trySendDataFoundOrGetFailed(null);
+ trySendDataFoundOrGetFailed(null, container);
}
// notify client that request was removed
FCPMessage msg = new
PersistentRequestRemovedMessage(getIdentifier(), global);
- client.queueClientRequestMessage(msg, 0);
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(client, 1);
+ client.queueClientRequestMessage(msg, 0, container);
- freeData();
- finish();
+ freeData(container);
+
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(fctx, 1);
+ if(fctx.allowedMIMETypes != null) {
+ container.activate(fctx.allowedMIMETypes, 5);
+ container.delete(fctx.allowedMIMETypes);
+ }
+ fctx.removeFrom(container);
+ getter.removeFrom(container, context);
+ if(targetFile != null)
+ container.delete(targetFile);
+ if(tempFile != null)
+ container.delete(tempFile);
+ if(getFailedMessage != null) {
+ container.activate(getFailedMessage, 5);
+ getFailedMessage.removeFrom(container);
+ }
+ if(postFetchProtocolErrorMessage != null) {
+
container.activate(postFetchProtocolErrorMessage, 5);
+
postFetchProtocolErrorMessage.removeFrom(container);
+ }
+ if(allDataPending != null) {
+ container.activate(allDataPending, 5);
+ allDataPending.removeFrom(container);
+ }
+ if(progressPending != null) {
+ container.activate(progressPending, 5);
+ progressPending.removeFrom(container);
+ }
+ }
+ super.requestWasRemoved(container, context);
}
- public void receive(ClientEvent ce) {
+ public void receive(ClientEvent ce, ObjectContainer container,
ClientContext context) {
// Don't need to lock, verbosity is final and finished is never
unset.
if(finished) return;
if(!(((verbosity & VERBOSITY_SPLITFILE_PROGRESS) ==
VERBOSITY_SPLITFILE_PROGRESS) &&
(ce instanceof SplitfileProgressEvent)))
return;
- SimpleProgressMessage progress =
+ final SimpleProgressMessage progress =
new SimpleProgressMessage(identifier, global,
(SplitfileProgressEvent)ce);
- trySendProgress(progress, null);
+ // container may be null...
+ if(persistenceType == PERSIST_FOREVER && container == null) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ trySendProgress(progress, null,
container);
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ } else {
+ trySendProgress(progress, null, container);
+ }
}
// This is distinct from the ClientGetMessage code, as later on it will
be radically
@@ -614,9 +734,21 @@
}
@Override
- protected void freeData() {
- if(returnBucket != null)
- returnBucket.free();
+ protected void freeData(ObjectContainer container) {
+ Bucket data;
+ synchronized(this) {
+ data = returnBucket;
+ returnBucket = null;
+ }
+ if(data != null) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(data, 5);
+ data.free();
+ if(persistenceType == PERSIST_FOREVER)
+ data.removeFrom(container);
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
+ }
}
@Override
@@ -632,23 +764,31 @@
return this.returnType == ClientGetMessage.RETURN_TYPE_DISK;
}
- public FreenetURI getURI() {
+ public FreenetURI getURI(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(uri, 5);
return uri;
}
- public long getDataSize() {
+ public long getDataSize(ObjectContainer container) {
if(foundDataLength > 0)
return foundDataLength;
- if(getter != null)
+ if(getter != null) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(getter, 1);
return getter.expectedSize();
+ }
return -1;
}
- public String getMIMEType() {
+ public String getMIMEType(ObjectContainer container) {
if(foundDataMimeType != null)
return foundDataMimeType;
- if(getter != null)
+ if(getter != null) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(getter, 1);
return getter.expectedMIME();
+ }
return null;
}
@@ -657,7 +797,9 @@
}
@Override
- public double getSuccessFraction() {
+ public double getSuccessFraction(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getFraction();
} else
@@ -665,7 +807,9 @@
}
@Override
- public double getTotalBlocks() {
+ public double getTotalBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getTotalBlocks();
} else
@@ -673,7 +817,9 @@
}
@Override
- public double getMinBlocks() {
+ public double getMinBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getMinBlocks();
} else
@@ -681,7 +827,9 @@
}
@Override
- public double getFailedBlocks() {
+ public double getFailedBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getFailedBlocks();
} else
@@ -689,7 +837,9 @@
}
@Override
- public double getFatalyFailedBlocks() {
+ public double getFatalyFailedBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getFatalyFailedBlocks();
} else
@@ -697,7 +847,9 @@
}
@Override
- public double getFetchedBlocks() {
+ public double getFetchedBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressPending !=
null)
+ container.activate(progressPending, 2);
if(progressPending != null) {
return progressPending.getFetchedBlocks();
} else
@@ -705,9 +857,11 @@
}
@Override
- public String getFailureReason() {
+ public String getFailureReason(ObjectContainer container) {
if(getFailedMessage == null)
return null;
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(getFailedMessage, 5);
String s = getFailedMessage.shortCodeDescription;
if(getFailedMessage.extraDescription != null)
s += ": "+getFailedMessage.extraDescription;
@@ -716,10 +870,14 @@
@Override
- public boolean isTotalFinalized() {
+ public boolean isTotalFinalized(ObjectContainer container) {
if(finished && succeeded) return true;
if(progressPending == null) return false;
- else return progressPending.isTotalFinalized();
+ else {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(progressPending, 1);
+ return progressPending.isTotalFinalized();
+ }
}
/**
@@ -728,18 +886,21 @@
* @return The data in a {@link Bucket}, or <code>null</code> if this
* isn’t applicable
*/
- public Bucket getBucket() {
+ public Bucket getBucket(ObjectContainer container) {
synchronized(this) {
if(targetFile != null) {
- if(succeeded || tempFile == null)
+ if(succeeded || tempFile == null) {
+ if(persistenceType == PERSIST_FOREVER)
container.activate(targetFile, 5);
return new FileBucket(targetFile,
false, true, false, false, false);
- else
+ } else {
+ if(persistenceType == PERSIST_FOREVER)
container.activate(tempFile, 5);
return new FileBucket(tempFile, false,
true, false, false, false);
+ }
} else return returnBucket;
}
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Ignore, we don't insert
}
@@ -757,29 +918,45 @@
}
@Override
- public boolean restart() {
+ public boolean restart(ObjectContainer container, ClientContext
context) {
if(!canRestart()) return false;
FreenetURI redirect;
synchronized(this) {
finished = false;
redirect =
getFailedMessage == null ? null :
getFailedMessage.redirectURI;
+ if(persistenceType == PERSIST_FOREVER &&
getFailedMessage != null)
+ getFailedMessage.removeFrom(container);
this.getFailedMessage = null;
+ if(persistenceType == PERSIST_FOREVER && allDataPending
!= null)
+ allDataPending.removeFrom(container);
this.allDataPending = null;
+ if(persistenceType == PERSIST_FOREVER &&
postFetchProtocolErrorMessage != null)
+
postFetchProtocolErrorMessage.removeFrom(container);
this.postFetchProtocolErrorMessage = null;
+ if(persistenceType == PERSIST_FOREVER &&
progressPending != null)
+ progressPending.removeFrom(container);
this.progressPending = null;
started = false;
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
try {
- if(getter.restart(redirect)) {
+ if(getter.restart(redirect, container, context)) {
synchronized(this) {
- if(redirect != null) this.uri =
redirect;
+ if(redirect != null) {
+ if(persistenceType ==
PERSIST_FOREVER)
+
uri.removeFrom(container);
+ this.uri = redirect;
+ }
started = true;
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
}
return true;
} catch (FetchException e) {
- onFailure(e, null);
+ onFailure(e, null, container);
return false;
}
}
@@ -787,4 +964,8 @@
public synchronized boolean hasPermRedirect() {
return getFailedMessage != null && getFailedMessage.redirectURI
!= null;
}
+
+ public void onRemoveEventProducer(ObjectContainer container) {
+ // Do nothing, we called the removeFrom().
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -7,6 +7,8 @@
import java.io.IOException;
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.node.RequestStarter;
@@ -268,4 +270,11 @@
throw new IllegalArgumentException("Invalid or unsupported
return type: "+returnTypeString(s));
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(diskFile);
+ container.delete(tempFile);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientHelloMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -49,4 +51,8 @@
handler.setClientName(clientName);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPut.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPut.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPut.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -20,10 +20,12 @@
import freenet.client.Metadata;
import freenet.client.MetadataUnresolvedException;
import freenet.client.async.BinaryBlob;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientGetter;
import freenet.client.async.ClientPutter;
import freenet.crypt.SHA256;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
import freenet.support.Base64;
import freenet.support.IllegalBase64Exception;
import freenet.support.Logger;
@@ -34,15 +36,17 @@
import freenet.support.io.FileBucket;
import freenet.support.io.SerializableToFieldSetBucketUtil;
+import com.db4o.ObjectContainer;
+
public class ClientPut extends ClientPutBase {
- final ClientPutter putter;
+ ClientPutter putter;
private final short uploadFrom;
/** Original filename if from disk, otherwise null. Purely for
PersistentPut. */
private final File origFilename;
/** If uploadFrom==UPLOAD_FROM_REDIRECT, this is the target of the
redirect */
private final FreenetURI targetURI;
- private final Bucket data;
+ private Bucket data;
private final ClientMetadata clientMetadata;
/** We store the size of inserted data before freeing it */
private long finishedSize;
@@ -93,14 +97,16 @@
* @throws NotAllowedException
* @throws FileNotFoundException
* @throws MalformedURLException
+ * @throws MetadataUnresolvedException
+ * @throws InsertException
*/
public ClientPut(FCPClient globalClient, FreenetURI uri, String
identifier, int verbosity,
short priorityClass, short persistenceType, String
clientToken, boolean getCHKOnly,
boolean dontCompress, int maxRetries, short
uploadFromType, File origFilename, String contentType,
- Bucket data, FreenetURI redirectTarget, String
targetFilename, boolean earlyEncode) throws IdentifierCollisionException,
NotAllowedException, FileNotFoundException, MalformedURLException {
- super(uri, identifier, verbosity, null, globalClient,
priorityClass, persistenceType, null, true, getCHKOnly, dontCompress,
maxRetries, earlyEncode);
+ Bucket data, FreenetURI redirectTarget, String
targetFilename, boolean earlyEncode, FCPServer server) throws
IdentifierCollisionException, NotAllowedException, FileNotFoundException,
MalformedURLException, MetadataUnresolvedException {
+ super(uri, identifier, verbosity, null, globalClient,
priorityClass, persistenceType, null, true, getCHKOnly, dontCompress,
maxRetries, earlyEncode, server);
if(uploadFromType == ClientPutMessage.UPLOAD_FROM_DISK) {
- if(!globalClient.core.allowUploadFrom(origFilename))
+ if(!server.core.allowUploadFrom(origFilename))
throw new NotAllowedException();
if(!(origFilename.exists() && origFilename.canRead()))
throw new FileNotFoundException();
@@ -114,8 +120,6 @@
// Now go through the fields one at a time
String mimeType = contentType;
this.clientToken = clientToken;
- if(persistenceType != PERSIST_CONNECTION)
- client.register(this, false);
Bucket tempData = data;
ClientMetadata cm = new ClientMetadata(mimeType);
boolean isMetadata = false;
@@ -125,17 +129,7 @@
this.targetURI = redirectTarget;
Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT,
null, null, targetURI, cm);
byte[] d;
- try {
- d = m.writeToByteArray();
- } catch (MetadataUnresolvedException e) {
- // Impossible
- Logger.error(this, "Impossible: "+e, e);
- onFailure(new
InsertException(InsertException.INTERNAL_ERROR, "Impossible: "+e+" in
ClientPut", null), null);
- this.data = null;
- clientMetadata = cm;
- putter = null;
- return;
- }
+ d = m.writeToByteArray();
tempData = new SimpleReadOnlyArrayBucket(d);
isMetadata = true;
} else
@@ -145,18 +139,16 @@
this.clientMetadata = cm;
putter = new ClientPutter(this, data, uri, cm,
- ctx,
client.core.requestStarters.chkPutScheduler,
client.core.requestStarters.sskPutScheduler, priorityClass,
- getCHKOnly, isMetadata, client.lowLevelClient,
null, targetFilename, binaryBlob);
- if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- }
+ ctx, priorityClass,
+ getCHKOnly, isMetadata,
+ lowLevelClient,
+ null, targetFilename, binaryBlob);
}
- public ClientPut(FCPConnectionHandler handler, ClientPutMessage
message) throws IdentifierCollisionException, MessageInvalidException,
MalformedURLException {
+ public ClientPut(FCPConnectionHandler handler, ClientPutMessage
message, FCPServer server) throws IdentifierCollisionException,
MessageInvalidException, MalformedURLException {
super(message.uri, message.identifier, message.verbosity,
handler,
message.priorityClass, message.persistenceType,
message.clientToken, message.global,
- message.getCHKOnly, message.dontCompress,
message.maxRetries, message.earlyEncode);
+ message.getCHKOnly, message.dontCompress,
message.maxRetries, message.earlyEncode, server);
String salt = null;
byte[] saltedHash = null;
binaryBlob = message.binaryBlob;
@@ -197,8 +189,6 @@
mimeType = DefaultMIMETypes.guessMIMEType(identifier,
true);
}
clientToken = message.clientToken;
- if(persistenceType != PERSIST_CONNECTION)
- client.register(this, false);
Bucket tempData = message.bucket;
ClientMetadata cm = new ClientMetadata(mimeType);
boolean isMetadata = false;
@@ -212,11 +202,11 @@
} catch (MetadataUnresolvedException e) {
// Impossible
Logger.error(this, "Impossible: "+e, e);
- onFailure(new
InsertException(InsertException.INTERNAL_ERROR, "Impossible: "+e+" in
ClientPut", null), null);
this.data = null;
clientMetadata = cm;
putter = null;
- return;
+ // This is *not* an InsertException since we
don't register it: it's a protocol error.
+ throw new
MessageInvalidException(ProtocolErrorMessage.INTERNAL_ERROR, "Impossible:
metadata unresolved: "+e, identifier, global);
}
tempData = new SimpleReadOnlyArrayBucket(d);
isMetadata = true;
@@ -258,14 +248,10 @@
if(logMINOR) Logger.minor(this, "data = "+data+", uploadFrom =
"+ClientPutMessage.uploadFromString(uploadFrom));
putter = new ClientPutter(this, data, uri, cm,
- ctx,
client.core.requestStarters.chkPutScheduler,
client.core.requestStarters.sskPutScheduler, priorityClass,
- getCHKOnly, isMetadata, client.lowLevelClient,
null, targetFilename, binaryBlob);
- if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- if(handler != null && (!handler.isGlobalSubscribed()))
- handler.outputHandler.queue(msg);
- }
+ ctx, priorityClass,
+ getCHKOnly, isMetadata,
+ lowLevelClient,
+ null, targetFilename, binaryBlob);
}
/**
@@ -274,9 +260,10 @@
* by the node.
* @throws PersistenceParseException
* @throws IOException
+ * @throws InsertException
*/
- public ClientPut(SimpleFieldSet fs, FCPClient client2) throws
PersistenceParseException, IOException {
- super(fs, client2);
+ public ClientPut(SimpleFieldSet fs, FCPClient client2, FCPServer
server, ObjectContainer container) throws PersistenceParseException,
IOException, InsertException {
+ super(fs, client2, server);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
String mimeType = fs.get("Metadata.ContentType");
@@ -311,7 +298,7 @@
Logger.minor(this, "Uploading from direct for
"+this);
if(!finished) {
try {
- data =
SerializableToFieldSetBucketUtil.create(fs.subset("TempBucket"), ctx.random,
client.server.core.persistentTempBucketFactory);
+ data =
SerializableToFieldSetBucketUtil.create(fs.subset("TempBucket"),
server.core.random, server.core.persistentTempBucketFactory);
} catch (CannotCreateFromFieldSetException e) {
throw new
PersistenceParseException("Could not read old bucket for "+identifier+" : "+e,
e);
}
@@ -333,12 +320,11 @@
} catch (MetadataUnresolvedException e) {
// Impossible
Logger.error(this, "Impossible: "+e, e);
- onFailure(new
InsertException(InsertException.INTERNAL_ERROR, "Impossible: "+e+" in
ClientPut", null), null);
this.data = null;
clientMetadata = cm;
origFilename = null;
putter = null;
- return;
+ throw new
InsertException(InsertException.INTERNAL_ERROR, "Impossible: "+e+" in
ClientPut", null);
}
data = new SimpleReadOnlyArrayBucket(d);
origFilename = null;
@@ -350,50 +336,71 @@
this.clientMetadata = cm;
SimpleFieldSet oldProgress = fs.subset("progress");
if(finished) oldProgress = null; // Not useful any more
- putter = new ClientPutter(this, data, uri, cm, ctx,
client.core.requestStarters.chkPutScheduler,
- client.core.requestStarters.sskPutScheduler,
priorityClass, getCHKOnly, isMetadata,
- client.lowLevelClient, oldProgress,
targetFilename, binaryBlob);
+ putter = new ClientPutter(this, data, uri, cm, ctx,
+ priorityClass, getCHKOnly, isMetadata,
+ lowLevelClient,
+ oldProgress, targetFilename, binaryBlob);
if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
+ FCPMessage msg = persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0, container);
}
}
+ void register(ObjectContainer container, boolean lazyResume, boolean
noTags) throws IdentifierCollisionException {
+ if(persistenceType != PERSIST_CONNECTION)
+ client.register(this, false, container);
+ if(persistenceType != PERSIST_CONNECTION && !noTags) {
+ FCPMessage msg = persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0, container);
+ }
+ }
+
@Override
- public void start() {
+ public void start(ObjectContainer container, ClientContext context) {
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Starting "+this+" : "+identifier);
synchronized(this) {
if(finished) return;
}
try {
- putter.start(earlyEncode);
+ putter.start(earlyEncode, false, container, context);
if(persistenceType != PERSIST_CONNECTION && !finished) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
+ FCPMessage msg =
persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0,
container);
}
synchronized(this) {
started = true;
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this); // Update
} catch (InsertException e) {
synchronized(this) {
started = true;
}
- onFailure(e, null);
+ onFailure(e, null, container);
} catch (Throwable t) {
synchronized(this) {
started = true;
}
- onFailure(new
InsertException(InsertException.INTERNAL_ERROR, t, null), null);
+ onFailure(new
InsertException(InsertException.INTERNAL_ERROR, t, null), null, container);
}
}
@Override
- protected void freeData() {
- if(data == null) return;
- finishedSize=data.size();
- data.free();
+ protected void freeData(ObjectContainer container) {
+ Bucket d;
+ synchronized(this) {
+ d = data;
+ data = null;
+ if(d == null) return;
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(d, 5);
+ finishedSize = d.size();
+ }
+ d.free();
+ if(persistenceType == PERSIST_FOREVER)
+ d.removeFrom(container);
}
@Override
@@ -431,10 +438,15 @@
}
@Override
- protected FCPMessage persistentTagMessage() {
+ protected FCPMessage persistentTagMessage(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(publicURI, 5);
+ container.activate(clientMetadata, 5);
+ container.activate(origFilename, 5);
+ }
return new PersistentPut(identifier, publicURI, verbosity,
priorityClass, uploadFrom, targetURI,
persistenceType, origFilename,
clientMetadata.getMIMEType(), client.isGlobalQueue,
- getDataSize(), clientToken, started,
ctx.maxInsertRetries, targetFilename, binaryBlob);
+ getDataSize(container), clientToken, started,
ctx.maxInsertRetries, targetFilename, binaryBlob);
}
@Override
@@ -447,7 +459,9 @@
return succeeded;
}
- public FreenetURI getFinalURI() {
+ public FreenetURI getFinalURI(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(generatedURI, 5);
return generatedURI;
}
@@ -461,11 +475,13 @@
return origFilename;
}
- public long getDataSize() {
+ public long getDataSize(ObjectContainer container) {
if(data == null)
return finishedSize;
- else
+ else {
+ container.activate(data, 1);
return data.size();
+ }
}
public String getMIMEType() {
@@ -486,24 +502,49 @@
}
@Override
- public boolean restart() {
+ public boolean restart(ObjectContainer container, ClientContext
context) {
if(!canRestart()) return false;
- setVarsRestart();
+ setVarsRestart(container);
try {
- if(putter.restart(earlyEncode)) {
+ if(putter.restart(earlyEncode, container, context)) {
synchronized(this) {
generatedURI = null;
started = true;
}
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
return true;
} catch (InsertException e) {
- onFailure(e, null);
+ onFailure(e, null, container);
return false;
}
}
- public void onFailure(FetchException e, ClientGetter state) {}
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {}
- public void onSuccess(FetchResult result, ClientGetter state) {}
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {}
+
+ public void onRemoveEventProducer(ObjectContainer container) {
+ // Do nothing, we called the removeFrom().
+ }
+
+ @Override
+ public void requestWasRemoved(ObjectContainer container, ClientContext
context) {
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(putter, 1);
+ putter.removeFrom(container, context);
+ putter = null;
+ if(origFilename != null) {
+ container.activate(origFilename, 5);
+ container.delete(origFilename);
+ }
+ // clientMetadata will be deleted by putter
+ if(targetURI != null) {
+ container.activate(targetURI, 5);
+ targetURI.removeFrom(container);
+ }
+ }
+ super.requestWasRemoved(container, context);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPutBase.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutBase.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPutBase.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -2,9 +2,13 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.client.*;
import freenet.client.async.BaseClientPutter;
import freenet.client.async.ClientCallback;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.client.events.ClientEvent;
import freenet.client.events.ClientEventListener;
import freenet.client.events.FinishedCompressionEvent;
@@ -16,6 +20,7 @@
import freenet.support.Fields;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.NativeThread;
/**
* Base class for ClientPut and ClientPutDir.
@@ -23,6 +28,7 @@
*/
public abstract class ClientPutBase extends ClientRequest implements
ClientCallback, ClientEventListener {
+ /** Created new for each ClientPutBase, so we have to delete it in
requestWasRemoved() */
final InsertContext ctx;
final boolean getCHKOnly;
@@ -56,10 +62,10 @@
public ClientPutBase(FreenetURI uri, String identifier, int verbosity,
FCPConnectionHandler handler,
short priorityClass, short persistenceType, String
clientToken, boolean global, boolean getCHKOnly,
- boolean dontCompress, int maxRetries, boolean
earlyEncode) throws MalformedURLException {
+ boolean dontCompress, int maxRetries, boolean
earlyEncode, FCPServer server) throws MalformedURLException {
super(uri, identifier, verbosity, handler, priorityClass,
persistenceType, clientToken, global);
this.getCHKOnly = getCHKOnly;
- ctx = new InsertContext(client.defaultInsertContext, new
SimpleEventProducer(), persistenceType == ClientRequest.PERSIST_CONNECTION);
+ ctx = new InsertContext(server.defaultInsertContext, new
SimpleEventProducer());
ctx.dontCompress = dontCompress;
ctx.eventProducer.addEventListener(this);
ctx.maxInsertRetries = maxRetries;
@@ -69,10 +75,10 @@
public ClientPutBase(FreenetURI uri, String identifier, int verbosity,
FCPConnectionHandler handler,
FCPClient client, short priorityClass, short
persistenceType, String clientToken, boolean global,
- boolean getCHKOnly, boolean dontCompress, int
maxRetries, boolean earlyEncode) throws MalformedURLException {
+ boolean getCHKOnly, boolean dontCompress, int
maxRetries, boolean earlyEncode, FCPServer server) throws MalformedURLException
{
super(uri, identifier, verbosity, handler, client,
priorityClass, persistenceType, clientToken, global);
this.getCHKOnly = getCHKOnly;
- ctx = new InsertContext(client.defaultInsertContext, new
SimpleEventProducer(), persistenceType == ClientRequest.PERSIST_CONNECTION);
+ ctx = new InsertContext(server.defaultInsertContext, new
SimpleEventProducer());
ctx.dontCompress = dontCompress;
ctx.eventProducer.addEventListener(this);
ctx.maxInsertRetries = maxRetries;
@@ -80,7 +86,7 @@
publicURI = getPublicURI(uri);
}
- public ClientPutBase(SimpleFieldSet fs, FCPClient client2) throws
MalformedURLException {
+ public ClientPutBase(SimpleFieldSet fs, FCPClient client2, FCPServer
server) throws MalformedURLException {
super(fs, client2);
publicURI = getPublicURI(uri);
getCHKOnly = Fields.stringToBool(fs.get("CHKOnly"), false);
@@ -90,7 +96,7 @@
finished = Fields.stringToBool(fs.get("Finished"), false);
//finished = false;
succeeded = Fields.stringToBool(fs.get("Succeeded"), false);
- ctx = new InsertContext(client.defaultInsertContext, new
SimpleEventProducer());
+ ctx = new InsertContext(server.defaultInsertContext, new
SimpleEventProducer());
ctx.dontCompress = dontCompress;
ctx.eventProducer.addEventListener(this);
ctx.maxInsertRetries = maxRetries;
@@ -126,54 +132,61 @@
}
@Override
- public void onLostConnection() {
+ public void onLostConnection(ObjectContainer container, ClientContext
context) {
if(persistenceType == PERSIST_CONNECTION)
- cancel();
+ cancel(container, context);
// otherwise ignore
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
synchronized(this) {
// Including this helps with certain bugs...
//progressMessage = null;
succeeded = true;
finished = true;
+ if(generatedURI == null)
+ Logger.error(this, "No generated URI in
onSuccess() for "+this+" from "+state);
}
// Could restart, and is on the putter, don't free data until
we remove the putter
- //freeData();
- finish();
- trySendFinalMessage(null);
- client.notifySuccess(this);
- if(persistenceType != PERSIST_CONNECTION)
- client.server.forceStorePersistentRequests();
+ //freeData(container);
+ finish(container);
+ trySendFinalMessage(null, container);
+ if(client != null)
+ client.notifySuccess(this, container);
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
if(finished) return;
synchronized(this) {
finished = true;
putFailedMessage = new PutFailedMessage(e, identifier,
global);
}
// Could restart, and is on the putter, don't free data until
we remove the putter
- //freeData();
- finish();
- trySendFinalMessage(null);
- client.notifyFailure(this);
- if(persistenceType != PERSIST_CONNECTION)
- client.server.forceStorePersistentRequests();
+ //freeData(container);
+ finish(container);
+ trySendFinalMessage(null, container);
+ if(client != null)
+ client.notifyFailure(this, container);
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
synchronized(this) {
- if((generatedURI != null) && !uri.equals(generatedURI))
- Logger.error(this, "onGeneratedURI("+uri+ ','
+state+") but already set generatedURI to "+generatedURI);
- generatedURI = uri;
+ if(generatedURI != null) {
+ if(!uri.equals(generatedURI))
+ Logger.error(this,
"onGeneratedURI("+uri+ ',' +state+") but already set generatedURI to
"+generatedURI);
+ else
+ if(Logger.shouldLog(Logger.MINOR,
this)) Logger.minor(this, "onGeneratedURI() twice with same value:
"+generatedURI+" -> "+uri);
+ } else {
+ generatedURI = uri;
+ }
}
- trySendGeneratedURIMessage(null);
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
+ trySendGeneratedURIMessage(null, container);
}
@Override
- public void requestWasRemoved() {
+ public void requestWasRemoved(ObjectContainer container, ClientContext
context) {
// if request is still running, send a PutFailed with
code=cancelled
if( !finished ) {
synchronized(this) {
@@ -181,40 +194,85 @@
InsertException cancelled = new
InsertException(InsertException.CANCELLED);
putFailedMessage = new
PutFailedMessage(cancelled, identifier, global);
}
- trySendFinalMessage(null);
+ trySendFinalMessage(null, container);
}
// notify client that request was removed
FCPMessage msg = new
PersistentRequestRemovedMessage(getIdentifier(), global);
- client.queueClientRequestMessage(msg, 0);
+ client.queueClientRequestMessage(msg, 0, container);
- freeData();
- finish();
+ freeData(container);
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(ctx, 2);
+ ctx.removeFrom(container);
+ PutFailedMessage pfm;
+ FreenetURI uri;
+ FreenetURI pubURI;
+ FCPMessage progress;
+ synchronized(this) {
+ pfm = putFailedMessage;
+ putFailedMessage = null;
+ uri = generatedURI;
+ generatedURI = null;
+ pubURI = publicURI;
+ progress = progressMessage;
+ progressMessage = null;
+ }
+ if(pfm != null) {
+ container.activate(pfm, 5);
+ pfm.removeFrom(container);
+ }
+ if(uri != null) {
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ }
+ if(progress != null) {
+ container.activate(progress, 1);
+ progress.removeFrom(container);
+ }
+ if(pubURI != null) {
+ container.activate(pubURI, 5);
+ pubURI.removeFrom(container);
+ }
+ }
+ super.requestWasRemoved(container, context);
}
- public void receive(ClientEvent ce) {
+ public void receive(final ClientEvent ce, ObjectContainer container,
ClientContext context) {
if(finished) return;
+ if(persistenceType == PERSIST_FOREVER && container == null) {
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(ClientPutBase.this,
1);
+ receive(ce, container, context);
+
container.deactivate(ClientPutBase.this, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ return;
+ }
if(ce instanceof SplitfileProgressEvent) {
if((verbosity & VERBOSITY_SPLITFILE_PROGRESS) ==
VERBOSITY_SPLITFILE_PROGRESS) {
SimpleProgressMessage progress =
new SimpleProgressMessage(identifier,
global, (SplitfileProgressEvent)ce);
- trySendProgressMessage(progress,
VERBOSITY_SPLITFILE_PROGRESS, null);
+ trySendProgressMessage(progress,
VERBOSITY_SPLITFILE_PROGRESS, null, container, context);
}
} else if(ce instanceof StartedCompressionEvent) {
if((verbosity & VERBOSITY_COMPRESSION_START_END) ==
VERBOSITY_COMPRESSION_START_END) {
StartedCompressionMessage msg =
new
StartedCompressionMessage(identifier, global,
((StartedCompressionEvent)ce).codec);
- trySendProgressMessage(msg,
VERBOSITY_COMPRESSION_START_END, null);
+ trySendProgressMessage(msg,
VERBOSITY_COMPRESSION_START_END, null, container, context);
}
} else if(ce instanceof FinishedCompressionEvent) {
if((verbosity & VERBOSITY_COMPRESSION_START_END) ==
VERBOSITY_COMPRESSION_START_END) {
FinishedCompressionMessage msg =
new
FinishedCompressionMessage(identifier, global, (FinishedCompressionEvent)ce);
- trySendProgressMessage(msg,
VERBOSITY_COMPRESSION_START_END, null);
+ trySendProgressMessage(msg,
VERBOSITY_COMPRESSION_START_END, null, container, context);
}
}
}
- public void onFetchable(BaseClientPutter putter) {
+ public void onFetchable(BaseClientPutter putter, ObjectContainer
container) {
if(finished) return;
if((verbosity & VERBOSITY_PUT_FETCHABLE) ==
VERBOSITY_PUT_FETCHABLE) {
FreenetURI temp;
@@ -223,16 +281,21 @@
}
PutFetchableMessage msg =
new PutFetchableMessage(identifier, global,
temp);
- trySendProgressMessage(msg, VERBOSITY_PUT_FETCHABLE,
null);
+ trySendProgressMessage(msg, VERBOSITY_PUT_FETCHABLE,
null, container, null);
}
}
- private void trySendFinalMessage(FCPConnectionOutputHandler handler) {
+ private void trySendFinalMessage(FCPConnectionOutputHandler handler,
ObjectContainer container) {
FCPMessage msg;
synchronized (this) {
+ FreenetURI uri = generatedURI;
+ if(persistenceType == PERSIST_FOREVER && uri != null) {
+ container.activate(uri, 5);
+ uri = uri.clone();
+ }
if(succeeded) {
- msg = new PutSuccessfulMessage(identifier,
global, generatedURI, startupTime, completionTime);
+ msg = new PutSuccessfulMessage(identifier,
global, uri, startupTime, completionTime);
} else {
msg = putFailedMessage;
}
@@ -241,43 +304,89 @@
if(msg == null) {
Logger.error(this, "Trying to send null message on
"+this, new Exception("error"));
} else {
+ if(persistenceType == PERSIST_CONNECTION && handler ==
null)
+ handler = origHandler.outputHandler;
if(handler != null)
handler.queue(msg);
else
- client.queueClientRequestMessage(msg, 0);
+ client.queueClientRequestMessage(msg, 0,
container);
}
}
- private void trySendGeneratedURIMessage(FCPConnectionOutputHandler
handler) {
+ private void trySendGeneratedURIMessage(FCPConnectionOutputHandler
handler, ObjectContainer container) {
FCPMessage msg;
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(client, 1);
+ container.activate(generatedURI, 5);
+ }
synchronized(this) {
- msg = new URIGeneratedMessage(generatedURI, identifier,
client.isGlobalQueue);
+ msg = new URIGeneratedMessage(generatedURI, identifier,
isGlobalQueue());
}
+ if(persistenceType == PERSIST_CONNECTION && handler == null)
+ handler = origHandler.outputHandler;
if(handler != null)
handler.queue(msg);
else
- client.queueClientRequestMessage(msg, 0);
+ client.queueClientRequestMessage(msg, 0, container);
}
- private void trySendProgressMessage(FCPMessage msg, int verbosity,
FCPConnectionOutputHandler handler) {
- synchronized(this) {
- if(persistenceType != PERSIST_CONNECTION)
- progressMessage = msg;
+ /**
+ * @param msg
+ * @param verbosity
+ * @param handler
+ * @param container Either container or context is required for a
persistent request.
+ * @param context Can be null if container is not null.
+ */
+ private void trySendProgressMessage(final FCPMessage msg, final int
verbosity, FCPConnectionOutputHandler handler, ObjectContainer container,
ClientContext context) {
+ if(persistenceType == PERSIST_FOREVER) {
+ if(container != null) {
+ FCPMessage oldProgress = null;
+ synchronized(this) {
+ if(persistenceType !=
PERSIST_CONNECTION) {
+ oldProgress = progressMessage;
+ progressMessage = msg;
+ }
+ }
+ if(oldProgress != null) {
+ container.activate(oldProgress, 1);
+ oldProgress.removeFrom(container);
+ }
+ container.store(this);
+ } else {
+ final FCPConnectionOutputHandler h = handler;
+ context.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
container.activate(ClientPutBase.this, 1);
+ trySendProgressMessage(msg,
verbosity, h, container, context);
+
container.deactivate(ClientPutBase.this, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ return;
+ }
+ } else {
+ synchronized(this) {
+ if(persistenceType != PERSIST_CONNECTION)
+ progressMessage = msg;
+ }
}
+ if(persistenceType == PERSIST_CONNECTION && handler == null)
+ handler = origHandler.outputHandler;
if(handler != null)
handler.queue(msg);
else
- client.queueClientRequestMessage(msg, verbosity);
+ client.queueClientRequestMessage(msg, verbosity,
container);
}
@Override
- public void sendPendingMessages(FCPConnectionOutputHandler handler,
boolean includePersistentRequest, boolean includeData, boolean onlyData) {
+ public void sendPendingMessages(FCPConnectionOutputHandler handler,
boolean includePersistentRequest, boolean includeData, boolean onlyData,
ObjectContainer container) {
if(persistenceType == PERSIST_CONNECTION) {
Logger.error(this, "WTF?
persistenceType="+persistenceType, new Exception("error"));
return;
}
if(includePersistentRequest) {
- FCPMessage msg = persistentTagMessage();
+ FCPMessage msg = persistentTagMessage(container);
handler.queue(msg);
}
@@ -289,12 +398,14 @@
msg = progressMessage;
fin = finished;
}
+ if(persistenceType == PERSIST_FOREVER && msg != null)
+ container.activate(msg, 5);
if(generated)
- trySendGeneratedURIMessage(handler);
+ trySendGeneratedURIMessage(handler, container);
if(msg != null)
handler.queue(msg);
if(fin)
- trySendFinalMessage(handler);
+ trySendFinalMessage(handler, container);
}
@Override
@@ -329,7 +440,9 @@
protected abstract String getTypeName();
@Override
- public synchronized double getSuccessFraction() {
+ public synchronized double getSuccessFraction(ObjectContainer
container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getFraction();
@@ -340,7 +453,9 @@
@Override
- public synchronized double getTotalBlocks() {
+ public synchronized double getTotalBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getTotalBlocks();
@@ -350,7 +465,9 @@
}
@Override
- public synchronized double getMinBlocks() {
+ public synchronized double getMinBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getMinBlocks();
@@ -360,7 +477,9 @@
}
@Override
- public synchronized double getFailedBlocks() {
+ public synchronized double getFailedBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getFailedBlocks();
@@ -370,7 +489,9 @@
}
@Override
- public synchronized double getFatalyFailedBlocks() {
+ public synchronized double getFatalyFailedBlocks(ObjectContainer
container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getFatalyFailedBlocks();
@@ -380,7 +501,9 @@
}
@Override
- public synchronized double getFetchedBlocks() {
+ public synchronized double getFetchedBlocks(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER && progressMessage !=
null)
+ container.activate(progressMessage, 2);
if(progressMessage != null) {
if(progressMessage instanceof SimpleProgressMessage)
return
((SimpleProgressMessage)progressMessage).getFetchedBlocks();
@@ -390,27 +513,48 @@
}
@Override
- public synchronized boolean isTotalFinalized() {
+ public synchronized boolean isTotalFinalized(ObjectContainer container)
{
if(!(progressMessage instanceof SimpleProgressMessage)) return
false;
- else return
((SimpleProgressMessage)progressMessage).isTotalFinalized();
+ else {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(progressMessage, 5);
+ return
((SimpleProgressMessage)progressMessage).isTotalFinalized();
+ }
}
@Override
- public synchronized String getFailureReason() {
+ public synchronized String getFailureReason(ObjectContainer container) {
if(putFailedMessage == null)
return null;
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(putFailedMessage, 5);
String s = putFailedMessage.shortCodeDescription;
if(putFailedMessage.extraDescription != null)
s += ": "+putFailedMessage.extraDescription;
return s;
}
- public void setVarsRestart() {
+ public void setVarsRestart(ObjectContainer container) {
+ PutFailedMessage pfm;
+ FCPMessage progress;
synchronized(this) {
finished = false;
+ pfm = putFailedMessage;
+ progress = progressMessage;
this.putFailedMessage = null;
this.progressMessage = null;
started = false;
}
+ if(pfm != null) {
+ container.activate(pfm, 1);
+ pfm.removeFrom(container);
+ }
+ if(progress != null) {
+ container.activate(progress, 1);
+ progress.removeFrom(container);
+ }
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
}
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPutComplexDirMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutComplexDirMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPutComplexDirMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -11,6 +11,8 @@
import java.util.LinkedList;
import java.util.Map;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.ManifestElement;
import freenet.node.Node;
import freenet.support.Logger;
@@ -181,4 +183,22 @@
}
}
+ public void removeFrom(ObjectContainer container) {
+ filesToRead.clear();
+ removeFrom(container, filesByName);
+ container.delete(this);
+ }
+
+ private void removeFrom(ObjectContainer container, HashMap filesByName)
{
+ Iterator i = filesByName.values().iterator();
+ while(i.hasNext()) {
+ Object val = i.next();
+ if(val instanceof HashMap) {
+ removeFrom(container, (HashMap) val);
+ } else {
+ ((DirPutFile)val).removeFrom(container);
+ }
+ }
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPutDir.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutDir.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPutDir.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -11,15 +11,21 @@
import java.util.Iterator;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
import freenet.client.DefaultMIMETypes;
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.client.InsertException;
+import freenet.client.async.BaseClientPutter;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientGetter;
import freenet.client.async.ClientRequester;
import freenet.client.async.ManifestElement;
import freenet.client.async.SimpleManifestPutter;
+import freenet.client.events.ClientEvent;
import freenet.keys.FreenetURI;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -30,32 +36,44 @@
public class ClientPutDir extends ClientPutBase {
- private final HashMap<String, Object> manifestElements;
+ private HashMap<String, Object> manifestElements;
private SimpleManifestPutter putter;
private final String defaultName;
private final long totalSize;
private final int numberOfFiles;
- private static boolean logMINOR;
private final boolean wasDiskPut;
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
public ClientPutDir(FCPConnectionHandler handler, ClientPutDirMessage
message,
- HashMap<String, Object> manifestElements, boolean
wasDiskPut) throws IdentifierCollisionException,
- MalformedURLException {
+ HashMap<String, Object> manifestElements, boolean
wasDiskPut, FCPServer server) throws IdentifierCollisionException,
MalformedURLException {
super(message.uri, message.identifier, message.verbosity,
handler,
message.priorityClass, message.persistenceType,
message.clientToken, message.global,
- message.getCHKOnly, message.dontCompress,
message.maxRetries, message.earlyEncode);
+ message.getCHKOnly, message.dontCompress,
message.maxRetries, message.earlyEncode, server);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.wasDiskPut = wasDiskPut;
- this.manifestElements = manifestElements;
+
+ // objectOnNew is called once, objectOnUpdate is never called,
yet manifestElements get blanked anyway!
+
+ this.manifestElements = new HashMap<String,Object>();
+ this.manifestElements.putAll(manifestElements);
+
+// this.manifestElements = manifestElements;
+
+// this.manifestElements = new HashMap<String, Object>();
+// this.manifestElements.putAll(manifestElements);
this.defaultName = message.defaultName;
makePutter();
- if(persistenceType != PERSIST_CONNECTION) {
- client.register(this, false);
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- if(handler != null && (!handler.isGlobalSubscribed()))
- handler.outputHandler.queue(msg);
- }
if(putter != null) {
numberOfFiles = putter.countFiles();
totalSize = putter.totalSize();
@@ -68,20 +86,16 @@
/**
* Puts a disk dir
+ * @throws InsertException
*/
- public ClientPutDir(FCPClient client, FreenetURI uri, String
identifier, int verbosity, short priorityClass, short persistenceType, String
clientToken, boolean getCHKOnly, boolean dontCompress, int maxRetries, File
dir, String defaultName, boolean allowUnreadableFiles, boolean global, boolean
earlyEncode) throws FileNotFoundException, IdentifierCollisionException,
MalformedURLException {
- super(uri, identifier, verbosity , null, client, priorityClass,
persistenceType, clientToken, global, getCHKOnly, dontCompress, maxRetries,
earlyEncode);
+ public ClientPutDir(FCPClient client, FreenetURI uri, String
identifier, int verbosity, short priorityClass, short persistenceType, String
clientToken, boolean getCHKOnly, boolean dontCompress, int maxRetries, File
dir, String defaultName, boolean allowUnreadableFiles, boolean global, boolean
earlyEncode, FCPServer server) throws FileNotFoundException,
IdentifierCollisionException, MalformedURLException {
+ super(uri, identifier, verbosity , null, client, priorityClass,
persistenceType, clientToken, global, getCHKOnly, dontCompress, maxRetries,
earlyEncode, server);
wasDiskPut = true;
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.manifestElements = makeDiskDirManifest(dir, "",
allowUnreadableFiles);
this.defaultName = defaultName;
makePutter();
- if(persistenceType != PERSIST_CONNECTION) {
- client.register(this, false);
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
- }
if(putter != null) {
numberOfFiles = putter.countFiles();
totalSize = putter.totalSize();
@@ -92,8 +106,16 @@
if(logMINOR) Logger.minor(this, "Putting dir "+identifier+" :
"+priorityClass);
}
- private HashMap<String, Object> makeDiskDirManifest(File dir, String
prefix, boolean allowUnreadableFiles)
- throws FileNotFoundException {
+ void register(ObjectContainer container, boolean lazyResume, boolean
noTags) throws IdentifierCollisionException {
+ if(persistenceType != PERSIST_CONNECTION)
+ client.register(this, false, container);
+ if(persistenceType != PERSIST_CONNECTION && !noTags) {
+ FCPMessage msg = persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0, container);
+ }
+ }
+
+ private HashMap<String, Object> makeDiskDirManifest(File dir, String
prefix, boolean allowUnreadableFiles) throws FileNotFoundException {
HashMap<String, Object> map = new HashMap<String, Object>();
File[] files = dir.listFiles();
@@ -129,20 +151,17 @@
private void makePutter() {
SimpleManifestPutter p;
- try {
- p = new SimpleManifestPutter(this,
client.core.requestStarters.chkPutScheduler,
client.core.requestStarters.sskPutScheduler,
- manifestElements, priorityClass, uri,
defaultName, ctx, getCHKOnly, client.lowLevelClient, earlyEncode);
- } catch (InsertException e) {
- onFailure(e, null);
- p = null;
- }
+ p = new SimpleManifestPutter(this,
+ manifestElements, priorityClass, uri,
defaultName, ctx, getCHKOnly,
+ lowLevelClient,
+ earlyEncode);
putter = p;
}
- public ClientPutDir(SimpleFieldSet fs, FCPClient client) throws
PersistenceParseException, IOException {
- super(fs, client);
+ public ClientPutDir(SimpleFieldSet fs, FCPClient client, FCPServer
server, ObjectContainer container) throws PersistenceParseException,
IOException {
+ super(fs, client, server);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
SimpleFieldSet files = fs.subset("Files");
defaultName = fs.get("DefaultName");
@@ -173,7 +192,7 @@
long sz =
Long.parseLong(subset.get("DataLength"));
if(!finished) {
try {
- data =
SerializableToFieldSetBucketUtil.create(fs.subset("ReturnBucket"), ctx.random,
client.server.core.persistentTempBucketFactory);
+ data =
SerializableToFieldSetBucketUtil.create(fs.subset("ReturnBucket"),
server.core.random, server.core.persistentTempBucketFactory);
} catch
(CannotCreateFromFieldSetException e) {
throw new
PersistenceParseException("Could not read old bucket for "+identifier+" : "+e,
e);
}
@@ -207,66 +226,85 @@
}
manifestElements = SimpleManifestPutter.unflatten(v);
SimpleManifestPutter p = null;
- try {
if(!finished)
- p = new SimpleManifestPutter(this,
client.core.requestStarters.chkPutScheduler,
client.core.requestStarters.sskPutScheduler,
- manifestElements,
priorityClass, uri, defaultName, ctx, getCHKOnly, client, earlyEncode);
- } catch (InsertException e) {
- onFailure(e, null);
- p = null;
- }
+ p = new SimpleManifestPutter(this,
+ manifestElements,
priorityClass, uri, defaultName, ctx, getCHKOnly,
+ lowLevelClient,
+ earlyEncode);
putter = p;
numberOfFiles = fileCount;
totalSize = size;
if(persistenceType != PERSIST_CONNECTION) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
+ FCPMessage msg = persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0, container);
}
}
@Override
- public void start() {
+ public void start(ObjectContainer container, ClientContext context) {
if(finished) return;
if(started) return;
try {
if(putter != null)
- putter.start();
+ putter.start(container, context);
started = true;
- if(logMINOR) Logger.minor(this, "Started "+putter);
+ if(logMINOR) Logger.minor(this, "Started "+putter+" for
"+this+" persistence="+persistenceType);
if(persistenceType != PERSIST_CONNECTION && !finished) {
- FCPMessage msg = persistentTagMessage();
- client.queueClientRequestMessage(msg, 0);
+ FCPMessage msg =
persistentTagMessage(container);
+ client.queueClientRequestMessage(msg, 0,
container);
}
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this); // Update
} catch (InsertException e) {
started = true;
- onFailure(e, null);
+ onFailure(e, null, container);
}
}
@Override
- public void onLostConnection() {
+ public void onLostConnection(ObjectContainer container, ClientContext
context) {
if(persistenceType == PERSIST_CONNECTION)
- cancel();
+ cancel(container, context);
// otherwise ignore
}
- @Override
- protected void freeData() {
- freeData(manifestElements);
+ @SuppressWarnings("unchecked")
+ protected void freeData(ObjectContainer container) {
+ if(logMINOR) Logger.minor(this, "freeData() on "+this+"
persistence type = "+persistenceType);
+ synchronized(this) {
+ if(manifestElements == null) {
+ if(logMINOR)
+ Logger.minor(this, "manifestElements =
"+manifestElements +
+ (persistenceType !=
PERSIST_FOREVER ? "" : (" dir.active="+container.ext().isActive(this))), new
Exception("error"));
+ return;
+ }
+ }
+ if(logMINOR) Logger.minor(this, "freeData() more on "+this+"
persistence type = "+persistenceType);
+ // We have to commit everything, so activating everything here
doesn't cost us much memory...?
+ if(persistenceType == PERSIST_FOREVER) {
+ container.deactivate(manifestElements, 1); // Must
deactivate before activating: If it has been activated to depth 1 (empty map)
at some point it will fail to activate to depth 2 (with contents). See
http://tracker.db4o.com/browse/COR-1582
+ container.activate(manifestElements, Integer.MAX_VALUE);
+ }
+ freeData(manifestElements, container);
+ manifestElements = null;
+ if(persistenceType == PERSIST_FOREVER) container.store(this);
}
@SuppressWarnings("unchecked")
- private void freeData(HashMap<String, Object> manifestElements) {
- Iterator<Object> i = manifestElements.values().iterator();
+ private void freeData(HashMap<String, Object> manifestElements,
ObjectContainer container) {
+ if(logMINOR) Logger.minor(this, "freeData() inner on "+this+"
persistence type = "+persistenceType+" size = "+manifestElements.size());
+ Iterator i = manifestElements.values().iterator();
while(i.hasNext()) {
Object o = i.next();
- if(o instanceof HashMap)
- freeData((HashMap<String, Object>) o);
- else {
+ if(o instanceof HashMap) {
+ freeData((HashMap<String, Object>) o,
container);
+ } else {
ManifestElement e = (ManifestElement) o;
- e.freeData();
+ if(logMINOR) Logger.minor(this, "Freeing "+e);
+ e.freeData(container, persistenceType ==
PERSIST_FOREVER);
}
}
+ if(persistenceType == PERSIST_FOREVER)
container.delete(manifestElements);
}
@Override
@@ -322,9 +360,14 @@
}
@Override
- protected FCPMessage persistentTagMessage() {
+ protected FCPMessage persistentTagMessage(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(publicURI, 5);
+ container.activate(ctx, 1);
+ container.activate(manifestElements, 5);
+ }
return new PersistentPutDir(identifier, publicURI, verbosity,
priorityClass,
- persistenceType, global, defaultName,
manifestElements, clientToken, started, ctx.maxInsertRetries, wasDiskPut);
+ persistenceType, global, defaultName,
manifestElements, clientToken, started, ctx.maxInsertRetries, wasDiskPut,
container);
}
@Override
@@ -337,7 +380,9 @@
return succeeded;
}
- public FreenetURI getFinalURI() {
+ public FreenetURI getFinalURI(ObjectContainer container) {
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(generatedURI, 5);
return generatedURI;
}
@@ -363,15 +408,37 @@
}
@Override
- public boolean restart() {
+ public boolean restart(ObjectContainer container, ClientContext
context) {
if(!canRestart()) return false;
- setVarsRestart();
- makePutter();
- start();
+ setVarsRestart(container);
+ makePutter();
+ start(container, context);
return true;
}
- public void onFailure(FetchException e, ClientGetter state) {}
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {}
- public void onSuccess(FetchResult result, ClientGetter state) {}
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {}
+
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
+ super.onSuccess(state, container);
+ }
+
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
+ super.onFailure(e, state, container);
+ }
+
+ public void onRemoveEventProducer(ObjectContainer container) {
+ // Do nothing, we called the removeFrom().
+ }
+
+ @Override
+ public void requestWasRemoved(ObjectContainer container, ClientContext
context) {
+ if(persistenceType == PERSIST_FOREVER) {
+ container.activate(putter, 1);
+ putter.removeFrom(container, context);
+ putter = null;
+ }
+ super.requestWasRemoved(container, context);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPutDiskDirMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutDiskDirMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPutDiskDirMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -9,6 +9,8 @@
import java.io.OutputStream;
import java.util.HashMap;
+import com.db4o.ObjectContainer;
+
import freenet.client.DefaultMIMETypes;
import freenet.client.async.ManifestElement;
import freenet.node.Node;
@@ -118,4 +120,9 @@
// Do nothing
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(dirname);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientPutMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -7,6 +7,8 @@
import java.io.IOException;
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.node.RequestStarter;
@@ -290,8 +292,19 @@
return global;
}
- public void freeData() {
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(origFilename);
+ if(redirectTarget != null)
+ redirectTarget.removeFrom(container);
+ }
+
+ public void freeData(ObjectContainer container) {
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.activate(bucket, 5);
bucket.free();
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ bucket.removeFrom(container);
}
}
Modified: trunk/freenet/src/freenet/node/fcp/ClientRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientRequest.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ClientRequest.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,18 +1,25 @@
package freenet.node.fcp;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.net.MalformedURLException;
import freenet.client.async.ClientRequester;
import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
import freenet.support.Fields;
+import freenet.support.LogThresholdCallback;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
import freenet.support.io.SerializableToFieldSetBucket;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.support.io.NativeThread;
+
/**
* A request process carried out by the node for an FCP client.
* Examples: ClientGet, ClientPut, MultiGet.
@@ -27,7 +34,7 @@
* differently. */
protected final int verbosity;
/** Original FCPConnectionHandler. Null if persistence != connection */
- protected final FCPConnectionHandler origHandler;
+ protected transient final FCPConnectionHandler origHandler;
/** Client */
protected final FCPClient client;
/** Priority class */
@@ -45,9 +52,30 @@
protected final long startupTime;
/** Timestamp : completion time */
protected long completionTime;
+ protected final RequestClient lowLevelClient;
+ private final int hashCode; // for debugging it is good to have a
persistent id
+
+ public int hashCode() {
+ return hashCode;
+ }
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
public ClientRequest(FreenetURI uri2, String identifier2, int
verbosity2, FCPConnectionHandler handler,
FCPClient client, short priorityClass2, short
persistenceType2, String clientToken2, boolean global) {
+ int hash = super.hashCode();
+ if(hash == 0) hash = 1;
+ hashCode = hash;
this.uri = uri2;
this.identifier = identifier2;
if(global)
@@ -59,16 +87,35 @@
this.persistenceType = persistenceType2;
this.clientToken = clientToken2;
this.global = global;
- if(persistenceType == PERSIST_CONNECTION)
+ if(persistenceType == PERSIST_CONNECTION) {
this.origHandler = handler;
- else
+ lowLevelClient = new RequestClient() {
+
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer
container) {
+ throw new
UnsupportedOperationException();
+ }
+
+ };
+ this.client = null;
+ } else {
origHandler = null;
- this.client = client;
+ this.client = client;
+ if(client != null)
+ assert(client.persistenceType ==
persistenceType);
+ lowLevelClient = client.lowLevelClient;
+ }
this.startupTime = System.currentTimeMillis();
}
public ClientRequest(FreenetURI uri2, String identifier2, int
verbosity2, FCPConnectionHandler handler,
short priorityClass2, short persistenceType2, String
clientToken2, boolean global) {
+ int hash = super.hashCode();
+ if(hash == 0) hash = 1;
+ hashCode = hash;
this.uri = uri2;
this.identifier = identifier2;
if(global)
@@ -80,19 +127,43 @@
this.persistenceType = persistenceType2;
this.clientToken = clientToken2;
this.global = global;
- if(persistenceType == PERSIST_CONNECTION)
+ if(persistenceType == PERSIST_CONNECTION) {
this.origHandler = handler;
- else
+ client = null;
+ lowLevelClient = new RequestClient() {
+
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer
container) {
+ throw new
UnsupportedOperationException();
+ }
+
+ };
+ } else {
origHandler = null;
if(global) {
- client = handler.server.globalClient;
+ client = persistenceType == PERSIST_FOREVER ?
handler.server.globalForeverClient : handler.server.globalRebootClient;
} else {
- client = handler.getClient();
+
assert(!handler.server.core.clientDatabaseExecutor.onThread());
+ client = persistenceType == PERSIST_FOREVER ?
handler.getForeverClient(null) : handler.getRebootClient();
}
+ lowLevelClient = client.lowLevelClient;
+ if(lowLevelClient == null)
+ throw new NullPointerException("No lowLevelClient from
client: "+client+" global = "+global+" persistence = "+persistenceType);
+ }
+ if(lowLevelClient.persistent() != (persistenceType ==
PERSIST_FOREVER))
+ throw new IllegalStateException("Low level
client.persistent="+lowLevelClient.persistent()+" but persistence type =
"+persistenceType);
+ if(client != null)
+ assert(client.persistenceType == persistenceType);
this.startupTime = System.currentTimeMillis();
}
public ClientRequest(SimpleFieldSet fs, FCPClient client2) throws
MalformedURLException {
+ int hash = super.hashCode();
+ if(hash == 0) hash = 1;
+ hashCode = hash;
priorityClass = Short.parseShort(fs.get("PriorityClass"));
uri = new FreenetURI(fs.get("URI"));
identifier = fs.get("Identifier");
@@ -113,13 +184,15 @@
completionTime = fs.getLong("CompletionTime", 0);
if (finished)
started=true;
+ assert(client.persistenceType == persistenceType);
+ lowLevelClient = client.lowLevelClient;
}
/** Lost connection */
- public abstract void onLostConnection();
+ public abstract void onLostConnection(ObjectContainer container,
ClientContext context);
/** Send any pending messages for a persistent request e.g. after
reconnecting */
- public abstract void sendPendingMessages(FCPConnectionOutputHandler
handler, boolean includePersistentRequest, boolean includeData, boolean
onlyData);
+ public abstract void sendPendingMessages(FCPConnectionOutputHandler
handler, boolean includePersistentRequest, boolean includeData, boolean
onlyData, ObjectContainer container);
// Persistence
@@ -150,8 +223,7 @@
return Short.parseShort(string);
}
- public static ClientRequest readAndRegister(BufferedReader br,
FCPServer server) throws IOException {
- boolean logMINOR = Logger.shouldLog(Logger.MINOR,
ClientRequest.class);
+ public static ClientRequest readAndRegister(BufferedReader br,
FCPServer server, ObjectContainer container, ClientContext context) throws
IOException {
Runtime rt = Runtime.getRuntime();
if(logMINOR)
Logger.minor(ClientRequest.class,
rt.maxMemory()-rt.freeMemory()+" in use before loading request");
@@ -165,28 +237,28 @@
}
FCPClient client;
if(!isGlobal)
- client = server.registerClient(clientName, server.core,
null);
+ client = server.registerForeverClient(clientName,
server.core, null, container);
else
- client = server.globalClient;
+ client = server.globalForeverClient;
if(logMINOR)
Logger.minor(ClientRequest.class,
rt.maxMemory()-rt.freeMemory()+" in use loading request "+clientName+"
"+fs.get("Identifier"));
try {
String type = fs.get("Type");
boolean lazyResume = server.core.lazyResume();
if(type.equals("GET")) {
- ClientGet cg = new ClientGet(fs, client);
- client.register(cg, lazyResume);
- if(!lazyResume) cg.start();
+ ClientGet cg = new ClientGet(fs, client,
server);
+ cg.register(container, lazyResume, true);
+ if(!lazyResume) cg.start(container, context);
return cg;
} else if(type.equals("PUT")) {
- ClientPut cp = new ClientPut(fs, client);
- client.register(cp, lazyResume);
- if(!lazyResume) cp.start();
+ ClientPut cp = new ClientPut(fs, client,
server, container);
+ client.register(cp, lazyResume, container);
+ if(!lazyResume) cp.start(container, context);
return cp;
} else if(type.equals("PUTDIR")) {
- ClientPutDir cp = new ClientPutDir(fs, client);
- client.register(cp, lazyResume);
- if(!lazyResume) cp.start();
+ ClientPutDir cp = new ClientPutDir(fs, client,
server, container);
+ client.register(cp, lazyResume, container);
+ if(!lazyResume) cp.start(container, context);
return cp;
} else {
Logger.error(ClientRequest.class, "Unrecognized
type: "+type);
@@ -200,12 +272,19 @@
return null;
}
}
+
+ abstract void register(ObjectContainer container, boolean lazyResume,
boolean noTags) throws IdentifierCollisionException;
- public void cancel() {
+ public void cancel(ObjectContainer container, ClientContext context) {
ClientRequester cr = getClientRequest();
// It might have been finished on startup.
- if(cr != null) cr.cancel();
- freeData();
+ if(persistenceType == PERSIST_FOREVER)
+ container.activate(cr, 1);
+ if(logMINOR) Logger.minor(this, "Cancelling "+cr+" for "+this+"
persistenceType = "+persistenceType);
+ if(cr != null) cr.cancel(container, context);
+ freeData(container);
+ if(persistenceType == PERSIST_FOREVER)
+ container.store(this);
}
public boolean isPersistentForever() {
@@ -229,9 +308,9 @@
protected abstract ClientRequester getClientRequest();
/** Completed request dropped off the end without being acknowledged */
- public void dropped() {
- cancel();
- freeData();
+ public void dropped(ObjectContainer container, ClientContext context) {
+ cancel(container, context);
+ freeData(container);
}
/** Return the priority class */
@@ -240,16 +319,17 @@
}
/** Free cached data bucket(s) */
- protected abstract void freeData();
+ protected abstract void freeData(ObjectContainer container);
/** Request completed. But we may have to stick around until we are
acked. */
- protected void finish() {
+ protected void finish(ObjectContainer container) {
completionTime = System.currentTimeMillis();
if(persistenceType == ClientRequest.PERSIST_CONNECTION)
origHandler.finishedClientRequest(this);
else
- client.server.forceStorePersistentRequests();
- client.finishedClientRequest(this);
+ client.finishedClientRequest(this, container);
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.store(this);
}
/**
@@ -271,30 +351,27 @@
*/
public abstract SimpleFieldSet getFieldSet() throws IOException;
- public abstract double getSuccessFraction();
+ public abstract double getSuccessFraction(ObjectContainer container);
- public abstract double getTotalBlocks();
- public abstract double getMinBlocks();
- public abstract double getFetchedBlocks();
- public abstract double getFailedBlocks();
- public abstract double getFatalyFailedBlocks();
+ public abstract double getTotalBlocks(ObjectContainer container);
+ public abstract double getMinBlocks(ObjectContainer container);
+ public abstract double getFetchedBlocks(ObjectContainer container);
+ public abstract double getFailedBlocks(ObjectContainer container);
+ public abstract double getFatalyFailedBlocks(ObjectContainer container);
- public abstract String getFailureReason();
+ public abstract String getFailureReason(ObjectContainer container);
/**
* Has the total number of blocks to insert been determined yet?
*/
- public abstract boolean isTotalFinalized();
+ public abstract boolean isTotalFinalized(ObjectContainer container);
- public void onMajorProgress() {
- if(persistenceType != ClientRequest.PERSIST_CONNECTION) {
- if(client != null)
- client.server.forceStorePersistentRequests();
- }
+ public void onMajorProgress(ObjectContainer container) {
+ // Ignore
}
/** Start the request, if it has not already been started. */
- public abstract void start();
+ public abstract void start(ObjectContainer container, ClientContext
context);
protected boolean started;
@@ -306,15 +383,16 @@
public abstract boolean canRestart();
- public abstract boolean restart();
+ public abstract boolean restart(ObjectContainer container,
ClientContext context);
- protected abstract FCPMessage persistentTagMessage();
+ protected abstract FCPMessage persistentTagMessage(ObjectContainer
container);
/**
* Called after a ModifyPersistentRequest.
* Sends a PersistentRequestModified message to clients if any value
changed.
+ * Commits before sending the messages.
*/
- public void modifyRequest(String newClientToken, short
newPriorityClass) {
+ public void modifyRequest(String newClientToken, short
newPriorityClass, FCPServer server, ObjectContainer container) {
boolean clientTokenChanged = false;
boolean priorityClassChanged = false;
@@ -333,19 +411,19 @@
if(newPriorityClass >= 0 && newPriorityClass != priorityClass) {
this.priorityClass = newPriorityClass;
- getClientRequest().setPriorityClass(priorityClass);
+ getClientRequest().setPriorityClass(priorityClass,
server.core.clientContext, container);
priorityClassChanged = true;
}
- if( clientTokenChanged || priorityClassChanged ) {
- if(persistenceType != ClientRequest.PERSIST_CONNECTION)
{
- if(client != null) {
-
client.server.forceStorePersistentRequests();
- }
- }
- } else {
+ if(! ( clientTokenChanged || priorityClassChanged ) ) {
return; // quick return, nothing was changed
}
+
+ if(persistenceType == PERSIST_FOREVER) {
+ container.store(this);
+ container.commit(); // commit before we send the message
+ if(logMINOR) Logger.minor(this, "COMMITTED");
+ }
// this could become too complex with more parameters, but for
now its ok
final PersistentRequestModifiedMessage modifiedMsg;
@@ -358,28 +436,68 @@
} else {
return; // paranoia, we should not be here if nothing
was changed!
}
- client.queueClientRequestMessage(modifiedMsg, 0);
+ client.queueClientRequestMessage(modifiedMsg, 0, container);
}
- /**
- * Called after a RemovePersistentRequest. Send a
PersistentRequestRemoved to the clients.
- */
- public abstract void requestWasRemoved();
-
/** Utility method for storing details of a possibly encrypted bucket.
*/
protected void bucketToFS(SimpleFieldSet fs, String name, boolean
includeSize, Bucket data) {
SerializableToFieldSetBucket bucket =
(SerializableToFieldSetBucket) data;
fs.put(name, bucket.toFieldSet());
}
- public void restartAsync() {
+ public void restartAsync(FCPServer server) {
synchronized(this) {
this.started = false;
}
- client.core.getExecutor().execute(new Runnable() {
- public void run() {
- restart();
+ server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(ClientRequest.this, 1);
+ restart(container, context);
+ container.deactivate(ClientRequest.this, 1);
}
- }, "Restarting "+this);
+
+ }, NativeThread.HIGH_PRIORITY, false);
}
+
+ /**
+ * Called after a RemovePersistentRequest. Send a
PersistentRequestRemoved to the clients.
+ * If the request is in the database, delete it.
+ */
+ public void requestWasRemoved(ObjectContainer container, ClientContext
context) {
+ if(persistenceType != PERSIST_FOREVER) return;
+ if(uri != null) uri.removeFrom(container);
+ container.delete(this);
+ }
+
+ protected boolean isGlobalQueue() {
+ if(client == null) return false;
+ return client.isGlobalQueue;
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(hashCode == 0) {
+ Logger.error(this, "Trying to update with hash 0 =>
already deleted!", new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(persistenceType != PERSIST_FOREVER) {
+ Logger.error(this, "Not storing non-persistent request
in database", new Exception("error"));
+ return false;
+ }
+ if(hashCode == 0) {
+ Logger.error(this, "Trying to write with hash 0 =>
already deleted!", new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+
}
Modified:
trunk/freenet/src/freenet/node/fcp/CloseConnectionDuplicateClientNameMessage.java
===================================================================
---
trunk/freenet/src/freenet/node/fcp/CloseConnectionDuplicateClientNameMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++
trunk/freenet/src/freenet/node/fcp/CloseConnectionDuplicateClientNameMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -28,4 +30,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"CloseConnectionDuplicateClientName goes from server to client not the other
way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ConfigData.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ConfigData.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ConfigData.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.config.Config;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -102,4 +104,9 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "ConfigData goes
from server to client not the other way around", null, false);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/DataCarryingMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -54,7 +54,7 @@
protected void writeData(OutputStream os) throws IOException {
long len = dataLength();
if(len > 0) BucketTools.copyTo(bucket, os, len);
- if(freeOnSent) bucket.free();
+ if(freeOnSent) bucket.free(); // Always transient so no
removeFrom() needed.
}
@Override
Modified: trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/DataFoundMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchResult;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -48,4 +50,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "DataFound goes
from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/DirPutFile.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DirPutFile.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/DirPutFile.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.client.ClientMetadata;
import freenet.client.DefaultMIMETypes;
import freenet.client.async.ManifestElement;
@@ -72,4 +74,12 @@
return new ManifestElement(n, getData(), getMIMEType(),
getData().size());
}
+ /**
+ * Remove the DirPutFile from the database. This would only be called
if we had stored a ClientPut*DirMessage
+ * into the database without executing it, which never happens.
+ * Maybe we should get rid and throw UnsupportedOperationException?
+ * @param container
+ */
+ public abstract void removeFrom(ObjectContainer container);
+
}
Modified: trunk/freenet/src/freenet/node/fcp/DirectDirPutFile.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DirectDirPutFile.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/DirectDirPutFile.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,6 +4,8 @@
import java.io.InputStream;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
@@ -55,4 +57,10 @@
return data;
}
+ public void removeFrom(ObjectContainer container) {
+ data.free();
+ data.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/DiskDirPutFile.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/DiskDirPutFile.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/DiskDirPutFile.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.File;
+import com.db4o.ObjectContainer;
+
import freenet.client.DefaultMIMETypes;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -41,4 +43,9 @@
return file;
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(file);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/EndListPeerNotesMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/EndListPeerNotesMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/EndListPeerNotesMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -37,4 +39,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "EndListPeerNotes
goes from server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/EndListPeersMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/EndListPeersMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/EndListPeersMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -34,4 +36,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "EndListPeers
goes from server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified:
trunk/freenet/src/freenet/node/fcp/EndListPersistentRequestsMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/EndListPersistentRequestsMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/EndListPersistentRequestsMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -26,4 +28,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"EndListPersistentRequests goes from server to client not the other way
around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPClient.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPClient.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPClient.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,74 +1,88 @@
package freenet.node.fcp;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
-import java.util.Vector;
+import java.util.Map;
-import freenet.client.FetchContext;
-import freenet.client.HighLevelSimpleClient;
-import freenet.client.InsertContext;
-import freenet.node.NodeClientCore;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.keys.FreenetURI;
+import freenet.node.RequestClient;
import freenet.support.Logger;
+import freenet.support.NullObject;
/**
* An FCP client.
- * Identified by its Name which is sent on connection.
+ * Identified by its Name which is sent on connection.
+ * Tracks persistent requests for either PERSISTENCE_REBOOT or
PERSISTENCE_FOREVER.
+ *
+ * Note that anything that modifies a non-transient field on a
PERSISTENCE_FOREVER client should be called in a transaction.
+ * Hence the addition of the ObjectContainer parameter to all such methods.
*/
public class FCPClient {
- public FCPClient(String name2, FCPServer server, FCPConnectionHandler
handler, boolean isGlobalQueue, RequestCompletionCallback cb) {
+ public FCPClient(String name2, FCPConnectionHandler handler, boolean
isGlobalQueue, RequestCompletionCallback cb, short persistenceType,
FCPPersistentRoot root, ObjectContainer container) {
this.name = name2;
if(name == null) throw new NullPointerException();
this.currentConnection = handler;
- this.runningPersistentRequests = new HashSet<ClientRequest>();
- this.completedUnackedRequests = new Vector<ClientRequest>();
- this.clientRequestsByIdentifier = new HashMap<String,
ClientRequest>();
- this.server = server;
- this.core = server.core;
- this.client = core.makeClient((short)0);
+ final boolean forever = (persistenceType ==
ClientRequest.PERSIST_FOREVER);
+ runningPersistentRequests = new ArrayList<ClientRequest>();
+ completedUnackedRequests = new ArrayList<ClientRequest>();
+ clientRequestsByIdentifier = new HashMap<String,
ClientRequest>();
this.isGlobalQueue = isGlobalQueue;
- defaultFetchContext = client.getFetchContext();
- defaultInsertContext = client.getInsertContext(false);
- clientsWatching = new LinkedList<FCPClient>();
+ this.persistenceType = persistenceType;
+ assert(persistenceType == ClientRequest.PERSIST_FOREVER ||
persistenceType == ClientRequest.PERSIST_REBOOT);
watchGlobalVerbosityMask = Integer.MAX_VALUE;
toStart = new LinkedList<ClientRequest>();
- lowLevelClient = this;
+ lowLevelClient = new RequestClient() {
+ public boolean persistent() {
+ return forever;
+ }
+ public void removeFrom(ObjectContainer container) {
+ if(forever)
+ container.delete(this);
+ else
+ throw new
UnsupportedOperationException();
+ }
+ };
completionCallback = cb;
+ if(persistenceType == ClientRequest.PERSIST_FOREVER) {
+ assert(root != null);
+ this.root = root;
+ } else
+ this.root = null;
}
+ /** The persistent root object, null if persistenceType is
PERSIST_REBOOT */
+ final FCPPersistentRoot root;
/** The client's Name sent in the ClientHello message */
final String name;
- /** The FCPServer */
- final FCPServer server;
/** The current connection handler, if any. */
- private FCPConnectionHandler currentConnection;
+ private transient FCPConnectionHandler currentConnection;
/** Currently running persistent requests */
- private final HashSet<ClientRequest> runningPersistentRequests;
+ private final List<ClientRequest> runningPersistentRequests;
/** Completed unacknowledged persistent requests */
- private final Vector<ClientRequest> completedUnackedRequests;
+ private final List<ClientRequest> completedUnackedRequests;
/** ClientRequest's by identifier */
- private final HashMap<String, ClientRequest> clientRequestsByIdentifier;
- /** Client (one FCPClient = one HighLevelSimpleClient = one round-robin
slot) */
- private final HighLevelSimpleClient client;
- public final FetchContext defaultFetchContext;
- public final InsertContext defaultInsertContext;
- public final NodeClientCore core;
+ private final Map<String, ClientRequest> clientRequestsByIdentifier;
/** Are we the global queue? */
public final boolean isGlobalQueue;
/** Are we watching the global queue? */
boolean watchGlobal;
int watchGlobalVerbosityMask;
- /** FCPClients watching us */
- // FIXME how do we lazily init this without synchronization problems?
- // We obviously can't synchronize on it when it hasn't been constructed
yet...
- final LinkedList<FCPClient> clientsWatching;
+ /** FCPClients watching us. Lazy init, sync on clientsWatchingLock */
+ private transient LinkedList<FCPClient> clientsWatching;
+ private final NullObject clientsWatchingLock = new NullObject();
private final LinkedList<ClientRequest> toStart;
- /** Low-level client object, for freenet.client.async. Normally ==
this. */
- final Object lowLevelClient;
- private RequestCompletionCallback completionCallback;
+ final RequestClient lowLevelClient;
+ private transient RequestCompletionCallback completionCallback;
+ /** Connection mode */
+ final short persistenceType;
public synchronized FCPConnectionHandler getConnection() {
return currentConnection;
@@ -88,15 +102,27 @@
* Called when a client request has finished, but is persistent. It has
not been
* acked yet, so it should be moved to the unacked-completed-requests
set.
*/
- public void finishedClientRequest(ClientRequest get) {
+ public void finishedClientRequest(ClientRequest get, ObjectContainer
container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Finished client request", new
Exception("debug"));
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
+ assert(get.persistenceType == persistenceType);
+ if(container != null) {
+ container.activate(runningPersistentRequests, 2);
+ container.activate(completedUnackedRequests, 2);
+ }
synchronized(this) {
if(runningPersistentRequests.remove(get)) {
completedUnackedRequests.add(get);
+ if(container != null) {
+ container.store(get);
+ //
http://tracker.db4o.com/browse/COR-1436
+ // If we don't specify depth, we end up
updating everything, resulting in Bad Things (especially on
ClientPutDir.manifestElements!)
+
container.ext().store(runningPersistentRequests, 2);
+
container.ext().store(completedUnackedRequests, 2);
+ }
}
}
- if(get.isPersistentForever()) {
- server.forceStorePersistentRequests();
- }
}
/**
@@ -104,30 +130,54 @@
* requests, to be immediately sent. This happens automatically on
startup and hopefully
* will encourage clients to acknowledge persistent requests!
*/
- public void
queuePendingMessagesOnConnectionRestart(FCPConnectionOutputHandler
outputHandler) {
+ public void
queuePendingMessagesOnConnectionRestart(FCPConnectionOutputHandler
outputHandler, ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
Object[] reqs;
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ }
synchronized(this) {
reqs = completedUnackedRequests.toArray();
}
- for(int i=0;i<reqs.length;i++)
-
((ClientRequest)reqs[i]).sendPendingMessages(outputHandler, true, false, false);
+ for(int i=0;i<reqs.length;i++) {
+ ClientRequest req = (ClientRequest) reqs[i];
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.activate(req, 1);
+
((ClientRequest)reqs[i]).sendPendingMessages(outputHandler, true, false, false,
container);
+ }
}
/**
* Queue any and all pending messages from running requests. Happens on
demand.
*/
- public void
queuePendingMessagesFromRunningRequests(FCPConnectionOutputHandler
outputHandler) {
+ public void
queuePendingMessagesFromRunningRequests(FCPConnectionOutputHandler
outputHandler, ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
Object[] reqs;
+ if(container != null) {
+ container.activate(runningPersistentRequests, 2);
+ }
synchronized(this) {
reqs = runningPersistentRequests.toArray();
}
- for(int i=0;i<reqs.length;i++)
-
((ClientRequest)reqs[i]).sendPendingMessages(outputHandler, true, false, false);
+ for(int i=0;i<reqs.length;i++) {
+ ClientRequest req = (ClientRequest) reqs[i];
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.activate(req, 1);
+ req.sendPendingMessages(outputHandler, true, false,
false, container);
+ }
}
- public void register(ClientRequest cg, boolean startLater) throws
IdentifierCollisionException {
+ public void register(ClientRequest cg, boolean startLater,
ObjectContainer container) throws IdentifierCollisionException {
+ assert(cg.persistenceType == persistenceType);
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Registering
"+cg.getIdentifier()+(startLater ? " to start later" : ""));
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ container.activate(runningPersistentRequests, 2);
+ container.activate(toStart, 2);
+ container.activate(clientRequestsByIdentifier, 2);
+ }
synchronized(this) {
String ident = cg.getIdentifier();
ClientRequest old =
clientRequestsByIdentifier.get(ident);
@@ -135,48 +185,127 @@
throw new IdentifierCollisionException();
if(cg.hasFinished()) {
completedUnackedRequests.add(cg);
+ if(container != null) {
+ container.store(cg);
+
container.store(completedUnackedRequests);
+ }
} else {
runningPersistentRequests.add(cg);
if(startLater) toStart.add(cg);
+ if(container != null) {
+ cg.storeTo(container);
+
container.ext().store(runningPersistentRequests, 2);
+ if(startLater) container.store(toStart);
+ }
}
clientRequestsByIdentifier.put(ident, cg);
+ if(container != null)
container.ext().store(clientRequestsByIdentifier, 2);
}
}
- public void removeByIdentifier(String identifier, boolean kill) throws
MessageInvalidException {
+ public boolean removeByIdentifier(String identifier, boolean kill,
FCPServer server, ObjectContainer container, ClientContext context) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
ClientRequest req;
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this,
"removeByIdentifier("+identifier+ ',' +kill+ ')');
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ container.activate(runningPersistentRequests, 2);
+ container.activate(clientRequestsByIdentifier, 2);
+ }
synchronized(this) {
req = clientRequestsByIdentifier.get(identifier);
- if(req == null)
- throw new
MessageInvalidException(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, "Not in hash",
identifier, isGlobalQueue);
- else if(!(runningPersistentRequests.remove(req) ||
completedUnackedRequests.remove(req)))
- throw new
MessageInvalidException(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, "Not found",
identifier, isGlobalQueue);
+// if(container != null && req != null)
+// container.activate(req, 1);
+ boolean removedFromRunning = false;
+ if(req == null) {
+ for(ClientRequest r : completedUnackedRequests)
{
+ container.activate(r, 1);
+
if(r.getIdentifier().equals(identifier)) {
+ req = r;
+
completedUnackedRequests.remove(r);
+ Logger.error(this, "Found
completed unacked request "+r+" for identifier "+r.getIdentifier()+" but not in
clientRequestsByIdentifier!!");
+ break;
+ }
+ container.deactivate(r, 1);
+ }
+ if(req == null) {
+ for(ClientRequest r :
runningPersistentRequests) {
+ container.activate(r, 1);
+
if(r.getIdentifier().equals(identifier)) {
+ req = r;
+
runningPersistentRequests.remove(r);
+ removedFromRunning =
true;
+ Logger.error(this,
"Found running request "+r+" for identifier "+r.getIdentifier()+" but not in
clientRequestsByIdentifier!!");
+ break;
+ }
+ container.deactivate(r, 1);
+ }
+ }
+ if(req == null) return false;
+ } else if(!((removedFromRunning =
runningPersistentRequests.remove(req)) ||
completedUnackedRequests.remove(req))) {
+ Logger.error(this, "Removing "+identifier+": in
clientRequestsByIdentifier but not in running/completed maps!");
+
+ return false;
+ }
clientRequestsByIdentifier.remove(identifier);
+ if(container != null) {
+ if(removedFromRunning)
container.store(runningPersistentRequests);
+ else container.store(completedUnackedRequests);
+
container.ext().store(clientRequestsByIdentifier, 2);
+ }
}
- req.requestWasRemoved();
+ if(container != null)
+ container.activate(req, 1);
if(kill) {
if(logMINOR) Logger.minor(this, "Killing request "+req);
- req.cancel();
+ req.cancel(container, context);
}
+ req.requestWasRemoved(container, context);
if(completionCallback != null)
- completionCallback.onRemove(req);
- server.forceStorePersistentRequests();
+ completionCallback.onRemove(req, container);
+ return true;
}
- public boolean hasPersistentRequests() {
+ public boolean hasPersistentRequests(ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
+ if(runningPersistentRequests == null) {
+ if(!container.ext().isActive(this))
+ Logger.error(this, "FCPCLIENT NOT ACTIVE!!!");
+ throw new NullPointerException();
+ }
+ if(completedUnackedRequests == null) {
+ if(!container.ext().isActive(this))
+ Logger.error(this, "FCPCLIENT NOT ACTIVE!!!");
+ throw new NullPointerException();
+ }
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ container.activate(runningPersistentRequests, 2);
+ }
return !(runningPersistentRequests.isEmpty() &&
completedUnackedRequests.isEmpty());
}
- public void addPersistentRequests(List<ClientRequest> v, boolean
onlyForever) {
+ public void addPersistentRequests(List<ClientRequest> v, boolean
onlyForever, ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ container.activate(runningPersistentRequests, 2);
+ container.activate(clientRequestsByIdentifier, 2);
+ }
synchronized(this) {
Iterator<ClientRequest> i =
runningPersistentRequests.iterator();
while(i.hasNext()) {
ClientRequest req = i.next();
+ if(container != null) container.activate(req,
1);
if(req.isPersistentForever() || !onlyForever)
v.add(req);
}
+ if(container != null) {
+ for(ClientRequest req :
completedUnackedRequests) {
+ container.activate(req, 1);
+ }
+ }
v.addAll(completedUnackedRequests);
}
}
@@ -187,19 +316,25 @@
* @param verbosityMask If so, what verbosity mask to use (to filter
messages
* generated by the global queue).
*/
- public void setWatchGlobal(boolean enabled, int verbosityMask) {
+ public void setWatchGlobal(boolean enabled, int verbosityMask,
FCPServer server, ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
if(isGlobalQueue) {
- Logger.error(this, "Set watch global on global queue!");
+ Logger.error(this, "Set watch global on global queue!:
"+this, new Exception("debug"));
return;
}
if(watchGlobal && !enabled) {
- server.globalClient.unwatch(this);
+ server.globalRebootClient.unwatch(this);
+ server.globalForeverClient.unwatch(this);
watchGlobal = false;
} else if(enabled && !watchGlobal) {
- server.globalClient.watch(this);
+ server.globalRebootClient.watch(this);
+ server.globalForeverClient.watch(this);
FCPConnectionHandler connHandler = getConnection();
if(connHandler != null) {
-
server.globalClient.queuePendingMessagesOnConnectionRestart(connHandler.outputHandler);
+ if(persistenceType ==
ClientRequest.PERSIST_REBOOT)
+
server.globalRebootClient.queuePendingMessagesOnConnectionRestart(connHandler.outputHandler,
container);
+ else
+
server.globalForeverClient.queuePendingMessagesOnConnectionRestart(connHandler.outputHandler,
container);
}
watchGlobal = true;
}
@@ -207,8 +342,12 @@
this.watchGlobalVerbosityMask = verbosityMask;
}
- public void queueClientRequestMessage(FCPMessage msg, int
verbosityLevel) {
- if((verbosityLevel & watchGlobalVerbosityMask) !=
verbosityLevel)
+ public void queueClientRequestMessage(FCPMessage msg, int
verbosityLevel, ObjectContainer container) {
+ queueClientRequestMessage(msg, verbosityLevel, false,
container);
+ }
+
+ public void queueClientRequestMessage(FCPMessage msg, int
verbosityLevel, boolean useGlobalMask, ObjectContainer container) {
+ if(useGlobalMask && (verbosityLevel & watchGlobalVerbosityMask)
!= verbosityLevel)
return;
FCPConnectionHandler conn = getConnection();
if(conn != null) {
@@ -216,42 +355,72 @@
}
FCPClient[] clients;
if(isGlobalQueue) {
- synchronized(clientsWatching) {
+ synchronized(clientsWatchingLock) {
+ if(clientsWatching != null)
clients = clientsWatching.toArray(new
FCPClient[clientsWatching.size()]);
+ else
+ clients = null;
}
- for(int i=0;i<clients.length;i++)
- clients[i].queueClientRequestMessage(msg,
verbosityLevel);
+ if(clients != null)
+ for(int i=0;i<clients.length;i++) {
+ if(persistenceType ==
ClientRequest.PERSIST_FOREVER)
+ container.activate(clients[i], 1);
+ if(clients[i].persistenceType !=
persistenceType) continue;
+ clients[i].queueClientRequestMessage(msg,
verbosityLevel, true, container);
+ if(persistenceType ==
ClientRequest.PERSIST_FOREVER)
+ container.deactivate(clients[i], 1);
+ }
}
}
private void unwatch(FCPClient client) {
if(!isGlobalQueue) return;
- synchronized(clientsWatching) {
+ synchronized(clientsWatchingLock) {
+ if(clientsWatching != null)
clientsWatching.remove(client);
}
}
private void watch(FCPClient client) {
if(!isGlobalQueue) return;
- synchronized(clientsWatching) {
+ synchronized(clientsWatchingLock) {
+ if(clientsWatching == null)
+ clientsWatching = new LinkedList<FCPClient>();
clientsWatching.add(client);
}
}
- public synchronized ClientRequest getRequest(String identifier) {
- return clientRequestsByIdentifier.get(identifier);
+ public synchronized ClientRequest getRequest(String identifier,
ObjectContainer container) {
+ assert((persistenceType == ClientRequest.PERSIST_FOREVER) ==
(container != null));
+ if(container != null) {
+ container.activate(clientRequestsByIdentifier, 2);
+ }
+ ClientRequest req = clientRequestsByIdentifier.get(identifier);
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.activate(req, 1);
+ return req;
}
/**
* Start all delayed-start requests.
*/
- public void finishStart() {
+ public void finishStart(ObjectContainer container, ClientContext
context) {
ClientRequest[] reqs;
+ if(container != null) {
+ container.activate(toStart, 2);
+ }
synchronized(this) {
reqs = toStart.toArray(new
ClientRequest[toStart.size()]);
+ toStart.clear();
+ container.store(toStart);
}
- for(int i=0;i<reqs.length;i++)
- reqs[i].start();
+ for(int i=0;i<reqs.length;i++) {
+ System.err.println("Starting migrated request "+i+" of
"+reqs.length);
+ final ClientRequest req = reqs[i];
+ container.activate(req, 1);
+ req.start(container, context);
+ container.deactivate(req, 1);
+ }
}
@Override
@@ -262,18 +431,20 @@
/**
* Callback called when a request succeeds.
*/
- public void notifySuccess(ClientRequest req) {
+ public void notifySuccess(ClientRequest req, ObjectContainer container)
{
+ assert(req.persistenceType == persistenceType);
if(completionCallback != null)
- completionCallback.notifySuccess(req);
+ completionCallback.notifySuccess(req, container);
}
/**
* Callback called when a request fails
* @param get
*/
- public void notifyFailure(ClientRequest req) {
+ public void notifyFailure(ClientRequest req, ObjectContainer container)
{
+ assert(req.persistenceType == persistenceType);
if(completionCallback != null)
- completionCallback.notifyFailure(req);
+ completionCallback.notifyFailure(req, container);
}
public synchronized RequestCompletionCallback
setRequestCompletionCallback(RequestCompletionCallback cb) {
@@ -281,4 +452,99 @@
completionCallback = cb;
return old;
}
+
+ public void removeFromDatabase(ObjectContainer container) {
+ container.activate(runningPersistentRequests, 2);
+ container.delete(runningPersistentRequests);
+ container.activate(completedUnackedRequests, 2);
+ container.delete(completedUnackedRequests);
+ container.activate(clientRequestsByIdentifier, 2);
+ container.delete(clientRequestsByIdentifier);
+ container.activate(toStart, 2);
+ container.delete(toStart);
+ container.activate(lowLevelClient, 2);
+ lowLevelClient.removeFrom(container);
+ container.delete(this);
+ container.delete(clientsWatchingLock);
+ }
+
+ public void removeAll(ObjectContainer container, ClientContext context)
{
+ HashSet<ClientRequest> toKill = new HashSet<ClientRequest>();
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ container.activate(runningPersistentRequests, 2);
+ container.activate(toStart, 2);
+ container.activate(clientRequestsByIdentifier, 2);
+ }
+ synchronized(this) {
+ Iterator<ClientRequest> i =
runningPersistentRequests.iterator();
+ while(i.hasNext()) {
+ ClientRequest req = i.next();
+ toKill.add(req);
+ }
+ runningPersistentRequests.clear();
+ for(int j=0;j<completedUnackedRequests.size();j++)
+ toKill.add(completedUnackedRequests.get(j));
+ completedUnackedRequests.clear();
+ i = clientRequestsByIdentifier.values().iterator();
+ while(i.hasNext()) {
+ ClientRequest req = i.next();
+ toKill.add(req);
+ }
+ clientRequestsByIdentifier.clear();
+ container.ext().store(clientRequestsByIdentifier, 2);
+ i = toStart.iterator();
+ while(i.hasNext()) {
+ ClientRequest req = i.next();
+ toKill.add(req);
+ }
+ toStart.clear();
+ }
+ Iterator<ClientRequest> i = toStart.iterator();
+ while(i.hasNext()) {
+ ClientRequest req = i.next();
+ req.cancel(container, context);
+ req.requestWasRemoved(container, context);
+ }
+ }
+
+ public ClientGet getCompletedRequest(FreenetURI key, ObjectContainer
container) {
+ // FIXME speed this up with another hashmap or something.
+ // FIXME keep a transient hashmap in RAM, use it for fproxy.
+ // FIXME consider supporting inserts too.
+ if(container != null) {
+ container.activate(completedUnackedRequests, 2);
+ }
+ for(int i=0;i<completedUnackedRequests.size();i++) {
+ ClientRequest req = completedUnackedRequests.get(i);
+ if(!(req instanceof ClientGet)) continue;
+ ClientGet getter = (ClientGet) req;
+ if(persistenceType == ClientRequest.PERSIST_FOREVER)
+ container.activate(getter, 1);
+ if(getter.getURI(container).equals(key)) {
+ return getter;
+ } else {
+ if(persistenceType ==
ClientRequest.PERSIST_FOREVER)
+ container.deactivate(getter, 1);
+ }
+ }
+ return null;
+ }
+
+ public void init(ObjectContainer container) {
+ container.activate(runningPersistentRequests, 2);
+ container.activate(completedUnackedRequests, 2);
+ container.activate(clientRequestsByIdentifier, 2);
+ container.activate(lowLevelClient, 2);
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(persistenceType != ClientRequest.PERSIST_FOREVER) {
+ Logger.error(this, "Not storing non-persistent request
in database", new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPConnectionHandler.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -11,12 +11,18 @@
import java.util.Iterator;
import java.util.Random;
+import com.db4o.ObjectContainer;
+
+import freenet.client.InsertException;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.support.HexUtil;
import freenet.support.Logger;
import freenet.support.LogThresholdCallback;
import freenet.support.api.BucketFactory;
import freenet.support.io.Closer;
import freenet.support.io.FileUtil;
+import freenet.support.io.NativeThread;
public class FCPConnectionHandler implements Closeable {
private static final class DirectoryAccess {
@@ -59,11 +65,14 @@
private boolean inputClosed;
private boolean outputClosed;
private String clientName;
- private FCPClient client;
+ private FCPClient rebootClient;
+ private FCPClient foreverClient;
+ private boolean failedGetForever;
final BucketFactory bf;
final HashMap<String, ClientRequest> requestsByIdentifier;
protected final String connectionIdentifier;
private static volatile boolean logMINOR;
+ private boolean killedDupe;
static {
Logger.registerLogThresholdCallback(new LogThresholdCallback(){
@@ -98,20 +107,46 @@
public void close() {
ClientRequest[] requests;
- if(client != null)
- client.onLostConnection(this);
+ if(rebootClient != null)
+ rebootClient.onLostConnection(this);
+ if(foreverClient != null)
+ foreverClient.onLostConnection(this);
+ boolean dupe;
synchronized(this) {
isClosed = true;
requests = new
ClientRequest[requestsByIdentifier.size()];
requests =
requestsByIdentifier.values().toArray(requests);
+ dupe = killedDupe;
}
for(int i=0;i<requests.length;i++)
- requests[i].onLostConnection();
- if((client != null) && !client.hasPersistentRequests())
- server.unregisterClient(client);
+ requests[i].onLostConnection(null,
server.core.clientContext);
+ if(!dupe) {
+ server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ if((rebootClient != null) &&
!rebootClient.hasPersistentRequests(null))
+ server.unregisterClient(rebootClient,
null);
+ if(foreverClient != null) {
+
if(!container.ext().isStored(foreverClient)) {
+ Logger.error(this,
"foreverClient is not stored in the database in lost connection non-dupe
callback; not deleting it");
+ return;
+ }
+ container.activate(foreverClient, 1);
+
if(!foreverClient.hasPersistentRequests(container))
+
server.unregisterClient(foreverClient, container);
+ container.deactivate(foreverClient, 1);
+ }
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ }
outputHandler.onClosed();
}
+ synchronized void setKilledDupe() {
+ killedDupe = true;
+ }
+
public synchronized boolean isClosed() {
return isClosed;
}
@@ -150,19 +185,54 @@
}
}
- public void setClientName(String name) {
+ public void setClientName(final String name) {
this.clientName = name;
- client = server.registerClient(name, server.core, this);
+ rebootClient = server.registerRebootClient(name, server.core,
this);
+
rebootClient.queuePendingMessagesOnConnectionRestart(outputHandler, null);
+ server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ try {
+ createForeverClient(name, container);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+"
creating persistent client for "+name, t);
+ failedGetForever = true;
+ synchronized(FCPConnectionHandler.this)
{
+ failedGetForever = true;
+
FCPConnectionHandler.this.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
if(logMINOR)
Logger.minor(this, "Set client name: "+name);
}
+ protected FCPClient createForeverClient(String name, ObjectContainer
container) {
+ synchronized(FCPConnectionHandler.this) {
+ if(foreverClient != null) return foreverClient;
+ }
+ FCPClient client = server.registerForeverClient(name,
server.core, FCPConnectionHandler.this, container);
+ synchronized(FCPConnectionHandler.this) {
+ foreverClient = client;
+ FCPConnectionHandler.this.notifyAll();
+ }
+ client.queuePendingMessagesOnConnectionRestart(outputHandler,
container);
+ return foreverClient;
+ }
+
public String getClientName() {
return clientName;
}
+ /**
+ * Start a ClientGet. If there is an identifier collision, queue an
IdentifierCollisionMessage.
+ * Hence, we can run stuff on other threads if we need to, as long as
we send the right messages.
+ */
public void startClientGet(ClientGetMessage message) {
- String id = message.identifier;
+ final String id = message.identifier;
+ final boolean global = message.global;
ClientGet cg = null;
boolean success;
boolean persistent = message.persistenceType !=
ClientRequest.PERSIST_CONNECTION;
@@ -175,9 +245,30 @@
success = !requestsByIdentifier.containsKey(id);
if(success) {
try {
- cg = new ClientGet(this, message);
+ cg = new ClientGet(this, message,
server);
if(!persistent)
requestsByIdentifier.put(id,
cg);
+ else if(message.persistenceType ==
ClientRequest.PERSIST_FOREVER) {
+ final ClientGet getter = cg;
+
server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void
run(ObjectContainer container, ClientContext context) {
+ try {
+
getter.register(container, false, false);
+
container.store(getter);
+ } catch
(IdentifierCollisionException e) {
+
Logger.normal(this, "Identifier collision on "+this);
+
FCPMessage msg = new IdentifierCollisionMessage(id, global);
+
outputHandler.queue(msg);
+ return;
+ }
+
getter.start(container, context);
+
container.deactivate(getter, 1);
+ }
+
+ },
NativeThread.HIGH_PRIORITY-1, false); // user wants a response soon... but
doesn't want it to block the queue page etc
+ return; // Don't run the
start() below
+ }
} catch (IdentifierCollisionException e) {
success = false;
} catch (MessageInvalidException e) {
@@ -192,20 +283,15 @@
outputHandler.queue(msg);
return;
} else {
- // Register before starting, because it may complete
immediately, and if it does,
- // we may end up with it not being removable because it
wasn't registered!
- if(cg.isPersistent()) {
- if(cg.isPersistentForever())
- server.forceStorePersistentRequests();
- }
- cg.start();
+ cg.start(null, server.core.clientContext);
}
}
- public void startClientPut(ClientPutMessage message) {
+ public void startClientPut(final ClientPutMessage message) {
if(logMINOR)
Logger.minor(this, "Starting insert
ID=\""+message.identifier+ '"');
- String id = message.identifier;
+ final String id = message.identifier;
+ final boolean global = message.global;
ClientPut cp = null;
boolean persistent = message.persistenceType !=
ClientRequest.PERSIST_CONNECTION;
FCPMessage failedMessage = null;
@@ -219,7 +305,7 @@
success = !requestsByIdentifier.containsKey(id);
if(success) {
try {
- cp = new ClientPut(this, message);
+ cp = new ClientPut(this, message,
server);
} catch (IdentifierCollisionException e) {
success = false;
} catch (MessageInvalidException e) {
@@ -228,8 +314,29 @@
} catch (MalformedURLException e) {
failedMessage = new
ProtocolErrorMessage(ProtocolErrorMessage.FREENET_URI_PARSE_ERROR, true, null,
id, message.global);
}
- if(!persistent)
+ if(cp != null && !persistent)
requestsByIdentifier.put(id, cp);
+ else if(cp != null && message.persistenceType
== ClientRequest.PERSIST_FOREVER) {
+ final ClientPut putter = cp;
+
server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ try {
+
putter.register(container, false, false);
+
container.store(putter);
+ } catch
(IdentifierCollisionException e) {
+
Logger.normal(this, "Identifier collision on "+this);
+ FCPMessage msg
= new IdentifierCollisionMessage(id, global);
+
outputHandler.queue(msg);
+ return;
+ }
+ putter.start(container,
context);
+
container.deactivate(putter, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1,
false); // user wants a response soon... but doesn't want it to block the queue
page etc
+ return; // Don't run the start() below
+ }
}
if(!success) {
Logger.normal(this, "Identifier collision on
"+this);
@@ -238,27 +345,36 @@
}
if(failedMessage != null) {
outputHandler.queue(failedMessage);
- if(cp != null)
- cp.freeData();
- else
- message.freeData();
+ if(persistent) {
+ final ClientPut c = cp;
+ server.core.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ if(c != null)
+ c.freeData(container);
+ else
+
message.freeData(container);
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1, false);
+ } else {
+ if(cp != null)
+ cp.freeData(null);
+ else
+ message.freeData(null);
+ }
return;
} else {
Logger.minor(this, "Starting "+cp);
- // Register before starting, because it may complete
immediately, and if it does,
- // we may end up with it not being removable because it
wasn't registered!
- if(cp.isPersistent()) {
- if(cp.isPersistentForever())
- server.forceStorePersistentRequests();
- }
- cp.start();
+ cp.start(null, server.core.clientContext);
}
}
public void startClientPutDir(ClientPutDirMessage message,
HashMap<String, Object> buckets, boolean wasDiskPut) {
if(logMINOR)
Logger.minor(this, "Start ClientPutDir");
- String id = message.identifier;
+ final String id = message.identifier;
+ final boolean global = message.global;
ClientPutDir cp = null;
FCPMessage failedMessage = null;
boolean persistent = message.persistenceType !=
ClientRequest.PERSIST_CONNECTION;
@@ -273,17 +389,38 @@
}
if(success) {
try {
- cp = new ClientPutDir(this, message, buckets,
wasDiskPut);
+ cp = new ClientPutDir(this, message, buckets,
wasDiskPut, server);
} catch (IdentifierCollisionException e) {
success = false;
} catch (MalformedURLException e) {
failedMessage = new
ProtocolErrorMessage(ProtocolErrorMessage.FREENET_URI_PARSE_ERROR, true, null,
id, message.global);
}
- if(!persistent) {
+ if(cp != null && !persistent) {
synchronized(this) {
requestsByIdentifier.put(id, cp);
}
// FIXME register non-persistent requests in
the constructors also, we already register persistent ones...
+ } else if(cp != null && message.persistenceType ==
ClientRequest.PERSIST_FOREVER) {
+ final ClientPutDir putter = cp;
+ server.core.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ try {
+
putter.register(container, false, false);
+ container.store(putter);
+ } catch
(IdentifierCollisionException e) {
+ Logger.normal(this,
"Identifier collision on "+this);
+ FCPMessage msg = new
IdentifierCollisionMessage(id, global);
+
outputHandler.queue(msg);
+ return;
+ }
+ putter.start(container,
context);
+ container.deactivate(putter, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1, false); //
user wants a response soon... but doesn't want it to block the queue page etc
+ return; // Don't run the start() below
+
}
if(!success) {
Logger.normal(this, "Identifier collision on
"+this);
@@ -293,25 +430,38 @@
if(failedMessage != null) {
outputHandler.queue(failedMessage);
if(cp != null)
- cp.cancel();
+ cp.cancel(null, server.core.clientContext);
return;
} else {
- // Register before starting, because it may complete
immediately, and if it does,
- // we may end up with it not being removable because it
wasn't registered!
- if(cp.isPersistent()) {
- if(cp.isPersistentForever())
- server.forceStorePersistentRequests();
- }
if(logMINOR)
Logger.minor(this, "Starting "+cp);
- cp.start();
+ cp.start(null, server.core.clientContext);
}
}
- public FCPClient getClient() {
- return client;
+ public FCPClient getRebootClient() {
+ return rebootClient;
}
+ public FCPClient getForeverClient(ObjectContainer container) {
+ synchronized(this) {
+ if(foreverClient != null)
+ return foreverClient;
+ if(container == null) {
+ while(foreverClient == null &&
(!failedGetForever) && (!isClosed)) {
+ try {
+ wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ return foreverClient;
+ } else {
+ return createForeverClient(clientName,
container);
+ }
+ }
+ }
+
public void finishedClientRequest(ClientRequest get) {
synchronized(this) {
requestsByIdentifier.remove(get.getIdentifier());
@@ -319,7 +469,7 @@
}
public boolean isGlobalSubscribed() {
- return client.watchGlobal;
+ return rebootClient.watchGlobal;
}
public boolean hasFullAccess() {
@@ -465,10 +615,56 @@
req = requestsByIdentifier.remove(identifier);
}
if(req != null) {
- req.requestWasRemoved();
if(kill)
- req.cancel();
+ req.cancel(null, server.core.clientContext);
+ req.requestWasRemoved(null, server.core.clientContext);
}
return req;
}
+
+ ClientRequest getRebootRequest(boolean global, FCPConnectionHandler
handler, String identifier) {
+ if(global)
+ return
handler.server.globalRebootClient.getRequest(identifier, null);
+ else
+ return handler.getRebootClient().getRequest(identifier,
null);
+ }
+
+ ClientRequest getForeverRequest(boolean global, FCPConnectionHandler
handler, String identifier, ObjectContainer container) {
+ if(global)
+ return
handler.server.globalForeverClient.getRequest(identifier, container);
+ else
+ return
handler.getForeverClient(container).getRequest(identifier, container);
+ }
+
+ ClientRequest removePersistentRebootRequest(boolean global, String
identifier) throws MessageInvalidException {
+ FCPClient client =
+ global ? server.globalRebootClient :
+ getRebootClient();
+ ClientRequest req = client.getRequest(identifier, null);
+ if(req != null) {
+ client.removeByIdentifier(identifier, true, server,
null, server.core.clientContext);
+ }
+ return req;
+ }
+
+ ClientRequest removePersistentForeverRequest(boolean global, String
identifier, ObjectContainer container) throws MessageInvalidException {
+ FCPClient client =
+ global ? server.globalForeverClient :
+ getForeverClient(container);
+ container.activate(client, 1);
+ ClientRequest req = client.getRequest(identifier, container);
+ if(req != null) {
+ client.removeByIdentifier(identifier, true, server,
container, server.core.clientContext);
+ }
+ if(!global)
+ container.deactivate(client, 1);
+ return req;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing FCPConnectionHandler in
database", new Exception("error"));
+ return false;
+ }
+
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPConnectionInputHandler.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -9,6 +9,8 @@
import org.tanukisoftware.wrapper.WrapperManager;
+import com.db4o.ObjectContainer;
+
import freenet.support.Logger;
import freenet.support.OOMHandler;
import freenet.support.SimpleFieldSet;
@@ -135,4 +137,9 @@
}
}
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ throw new
UnsupportedOperationException("FCPConnectionInputHandler storage in database
not supported");
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPConnectionOutputHandler.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -8,6 +8,8 @@
import java.io.OutputStream;
import java.util.LinkedList;
+import com.db4o.ObjectContainer;
+
import freenet.support.Logger;
import freenet.support.OOMHandler;
@@ -99,4 +101,8 @@
}
}
+ public boolean objectCanNew(ObjectContainer container) {
+ throw new
UnsupportedOperationException("FCPConnectionOutputHandler storage in database
not supported");
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPMessage.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPMessage.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
import java.io.IOException;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
@@ -108,4 +110,7 @@
* @throws MessageInvalidException */
public abstract void run(FCPConnectionHandler handler, Node node)
throws MessageInvalidException;
+ /** Remove this message and its dependancies (internal objects) from
the database. */
+ public abstract void removeFrom(ObjectContainer container);
+
}
Copied: trunk/freenet/src/freenet/node/fcp/FCPPersistentRoot.java (from rev
26320, branches/db4o/freenet/src/freenet/node/fcp/FCPPersistentRoot.java)
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPPersistentRoot.java
(rev 0)
+++ trunk/freenet/src/freenet/node/fcp/FCPPersistentRoot.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,90 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.node.fcp;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Constraint;
+import com.db4o.query.Predicate;
+import com.db4o.query.Query;
+
+import freenet.node.NodeClientCore;
+import freenet.support.Logger;
+
+/**
+ * Persistent root object for FCP.
+ * @author toad
+ */
+public class FCPPersistentRoot {
+
+ final long nodeDBHandle;
+ final FCPClient globalForeverClient;
+
+ public FCPPersistentRoot(long nodeDBHandle, ObjectContainer container) {
+ this.nodeDBHandle = nodeDBHandle;
+ globalForeverClient = new FCPClient("Global Queue", null, true,
null, ClientRequest.PERSIST_FOREVER, this, container);
+ }
+
+ public static FCPPersistentRoot create(final long nodeDBHandle,
ObjectContainer container) {
+ ObjectSet<FCPPersistentRoot> set = container.query(new
Predicate<FCPPersistentRoot>() {
+ public boolean match(FCPPersistentRoot root) {
+ return root.nodeDBHandle == nodeDBHandle;
+ }
+ });
+ System.err.println("Count of roots: "+set.size());
+ if(set.hasNext()) {
+ System.err.println("Loaded FCP persistent root.");
+ FCPPersistentRoot root = set.next();
+ container.activate(root, 2);
+ root.globalForeverClient.init(container);
+ return root;
+ }
+ FCPPersistentRoot root = new FCPPersistentRoot(nodeDBHandle,
container);
+ container.store(root);
+ System.err.println("Created FCP persistent root.");
+ return root;
+ }
+
+ public FCPClient registerForeverClient(final String name,
NodeClientCore core, FCPConnectionHandler handler, FCPServer server,
ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Registering forever-client for "+name);
+ /**
+ * FIXME DB4O:
+ * Native queries involving strings seem to do wierd things. I
was getting
+ * the global queue returned here even though I compared with
the passed-in
+ * name! :<
+ * FIXME reproduce and file a bug for db4o.
+ */
+ Query query = container.query();
+ query.constrain(FCPClient.class);
+ // Don't constrain by root because that set is huge.
+ // I think that was the cause of the OOMs here...
+ Constraint con = query.descend("name").constrain(name);
+ con.and(query.descend("root").constrain(this).identity());
+ ObjectSet set = query.execute();
+ while(set.hasNext()) {
+ FCPClient client = (FCPClient) set.next();
+ container.activate(client, 1);
+ if(client.root != this) {
+ container.deactivate(client, 1);
+ continue;
+ }
+ client.setConnection(handler);
+ if(!(name.equals(client.name)))
+ Logger.error(this, "Returning "+client+" for
"+name);
+ if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Returning "+client+" for "+name);
+ client.init(container);
+ return client;
+ }
+ FCPClient client = new FCPClient(name, handler, false, null,
ClientRequest.PERSIST_FOREVER, this, container);
+ container.store(client);
+ return client;
+ }
+
+ public void maybeUnregisterClient(FCPClient client, ObjectContainer
container) {
+ if(!client.hasPersistentRequests(container)) {
+ client.removeFromDatabase(container);
+ }
+ }
+
+}
Modified: trunk/freenet/src/freenet/node/fcp/FCPPluginMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPPluginMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPPluginMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.pluginmanager.PluginNotFoundException;
import freenet.pluginmanager.PluginTalker;
@@ -111,4 +113,8 @@
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPPluginReply.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPPluginReply.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPPluginReply.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -79,4 +81,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, NAME + " goes
from server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FCPServer.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FCPServer.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FCPServer.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -4,16 +4,12 @@
package freenet.node.fcp;
import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
import java.io.BufferedReader;
-import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Iterator;
@@ -21,14 +17,20 @@
import java.util.List;
import java.util.WeakHashMap;
import java.util.zip.GZIPInputStream;
-import java.util.zip.GZIPOutputStream;
import org.tanukisoftware.wrapper.WrapperManager;
+import com.db4o.ObjectContainer;
+
+import freenet.client.ClientMetadata;
import freenet.client.DefaultMIMETypes;
import freenet.client.FetchContext;
+import freenet.client.FetchResult;
import freenet.client.HighLevelSimpleClient;
import freenet.client.InsertContext;
+import freenet.client.TempFetchResult;
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.config.Config;
import freenet.config.InvalidConfigValueException;
import freenet.config.SubConfig;
@@ -43,13 +45,15 @@
import freenet.node.RequestStarter;
import freenet.support.Base64;
import freenet.support.Logger;
+import freenet.support.MutableBoolean;
import freenet.support.OOMHandler;
import freenet.support.api.BooleanCallback;
import freenet.support.api.Bucket;
import freenet.support.api.IntCallback;
-import freenet.support.api.LongCallback;
import freenet.support.api.StringCallback;
+import freenet.support.io.BucketTools;
import freenet.support.io.Closer;
+import freenet.support.io.NativeThread;
import freenet.support.io.FileUtil;
/**
@@ -57,6 +61,7 @@
*/
public class FCPServer implements Runnable {
+ final FCPPersistentRoot persistentRoot;
private static boolean logMINOR;
public final static int DEFAULT_FCP_PORT = 9481;
NetworkInterface networkInterface;
@@ -68,8 +73,9 @@
String bindTo;
private String allowedHosts;
AllowedHosts allowedHostsFullAccess;
- final WeakHashMap<String, FCPClient> clientsByName;
- final FCPClient globalClient;
+ final WeakHashMap<String, FCPClient> rebootClientsByName;
+ final FCPClient globalRebootClient;
+ final FCPClient globalForeverClient;
private boolean enablePersistentDownloads;
private File persistentDownloadsFile;
private File persistentDownloadsTempFile;
@@ -77,32 +83,18 @@
* MUST ALWAYS BE THE OUTERMOST LOCK.
*/
private final Object persistenceSync = new Object();
- private FCPServerPersister persister;
- private boolean haveLoadedPersistentRequests;
- private long persistenceInterval;
final FetchContext defaultFetchContext;
public InsertContext defaultInsertContext;
public static final int QUEUE_MAX_RETRIES = -1;
public static final long QUEUE_MAX_DATA_SIZE = Long.MAX_VALUE;
- private boolean canStartPersister = false;
private boolean assumeDownloadDDAIsAllowed;
private boolean assumeUploadDDAIsAllowed;
private boolean hasFinishedStart;
- private void startPersister() {
- node.executor.execute(persister = new FCPServerPersister(),
"FCP request persister");
- }
-
- private void killPersister() {
- persister.kill();
- persister = null;
- }
-
- public FCPServer(String ipToBindTo, String allowedHosts, String
allowedHostsFullAccess, int port, Node node, NodeClientCore core, boolean
persistentDownloadsEnabled, String persistentDownloadsDir, long
persistenceInterval, boolean isEnabled, boolean assumeDDADownloadAllowed,
boolean assumeDDAUploadAllowed) throws IOException, InvalidConfigValueException
{
+ public FCPServer(String ipToBindTo, String allowedHosts, String
allowedHostsFullAccess, int port, Node node, NodeClientCore core, boolean
persistentDownloadsEnabled, String persistentDownloadsDir, boolean isEnabled,
boolean assumeDDADownloadAllowed, boolean assumeDDAUploadAllowed,
ObjectContainer container) throws IOException, InvalidConfigValueException {
this.bindTo = ipToBindTo;
this.allowedHosts=allowedHosts;
this.allowedHostsFullAccess = new
AllowedHosts(allowedHostsFullAccess);
- this.persistenceInterval = persistenceInterval;
this.port = port;
this.enabled = isEnabled;
this.enablePersistentDownloads = persistentDownloadsEnabled;
@@ -111,7 +103,7 @@
this.core = core;
this.assumeDownloadDDAIsAllowed = assumeDDADownloadAllowed;
this.assumeUploadDDAIsAllowed = assumeDDAUploadAllowed;
- clientsByName = new WeakHashMap<String, FCPClient>();
+ rebootClientsByName = new WeakHashMap<String, FCPClient>();
// This one is only used to get the default settings.
Individual FCP conns
// will make their own.
@@ -119,12 +111,17 @@
defaultFetchContext = client.getFetchContext();
defaultInsertContext = client.getInsertContext(false);
- globalClient = new FCPClient("Global Queue", this, null, true,
null);
+ globalRebootClient = new FCPClient("Global Queue", null, true,
null, ClientRequest.PERSIST_REBOOT, null, null);
logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ persistentRoot = FCPPersistentRoot.create(node.nodeDBHandle,
container);
+ globalForeverClient = persistentRoot.globalForeverClient;
+
if(enabled && enablePersistentDownloads) {
- loadPersistentRequests();
+ Logger.error(this, "Persistent downloads enabled:
attempting to migrate old persistent downloads to database...");
+ Logger.error(this, "Note that we will not write to
downloads.dat.gz, we will read from it and rename it if migration is
successful.");
+ loadPersistentRequests(container);
} else {
Logger.error(this, "Not loading persistent requests:
enabled="+enabled+" enable persistent downloads="+enablePersistentDownloads);
}
@@ -328,6 +325,22 @@
}
}
+ static class PersistentDownloadsEnabledCallback extends BooleanCallback
{
+
+ boolean enabled;
+
+ public Boolean get() {
+ return enabled;
+ }
+
+ public void set(Boolean set) throws InvalidConfigValueException
{
+ // This option will be removed completely soon, so
there is little
+ // point in translating it. FIXME remove.
+ if(set.booleanValue() != enabled) throw new
InvalidConfigValueException("Cannot disable/enable persistent download loading
support on the fly");
+ }
+
+ }
+
static class FCPAllowedHostsFullAccessCallback extends StringCallback {
private final NodeClientCore node;
@@ -348,24 +361,6 @@
}
}
-
- static class PersistentDownloadsEnabledCallback extends BooleanCallback
{
-
- FCPServer server;
-
- @Override
- public Boolean get() {
- return server.persistentDownloadsEnabled();
- }
-
- @Override
- public void set(Boolean set) {
- if(server.persistentDownloadsEnabled() != set)
- server.setPersistentDownloadsEnabled(set);
- }
-
- }
-
static class PersistentDownloadsFileCallback extends StringCallback {
FCPServer server;
@@ -383,27 +378,6 @@
}
}
- static class PersistentDownloadsIntervalCallback extends LongCallback {
-
- FCPServer server;
-
- @Override
- public Long get() {
- return server.persistenceInterval;
- }
-
- @Override
- public void set(Long value) {
- server.persistenceInterval = value;
- FCPServerPersister p = server.persister;
- if(p != null) {
- synchronized(p) {
- p.notifyAll();
- }
- }
- }
- }
-
static class AssumeDDADownloadIsAllowedCallback extends BooleanCallback
{
FCPServer server;
@@ -437,7 +411,7 @@
}
- public static FCPServer maybeCreate(Node node, NodeClientCore core,
Config config) throws IOException, InvalidConfigValueException {
+ public static FCPServer maybeCreate(Node node, NodeClientCore core,
Config config, ObjectContainer container) throws IOException,
InvalidConfigValueException {
SubConfig fcpConfig = new SubConfig("fcp", config);
short sortOrder = 0;
fcpConfig.register("enabled", true, sortOrder++, true, false,
"FcpServer.isEnabled", "FcpServer.isEnabledLong", new FCPEnabledCallback(core));
@@ -446,15 +420,14 @@
fcpConfig.register("bindTo", NetworkInterface.DEFAULT_BIND_TO,
sortOrder++, true, true, "FcpServer.bindTo", "FcpServer.bindToLong", new
FCPBindtoCallback(core));
fcpConfig.register("allowedHosts",
NetworkInterface.DEFAULT_BIND_TO, sortOrder++, true, true,
"FcpServer.allowedHosts", "FcpServer.allowedHostsLong", new
FCPAllowedHostsCallback(core));
fcpConfig.register("allowedHostsFullAccess",
NetworkInterface.DEFAULT_BIND_TO, sortOrder++, true, true,
"FcpServer.allowedHostsFullAccess", "FcpServer.allowedHostsFullAccessLong", new
FCPAllowedHostsFullAccessCallback(core));
- PersistentDownloadsEnabledCallback cb1;
PersistentDownloadsFileCallback cb2;
- PersistentDownloadsIntervalCallback cb3;
- fcpConfig.register("persistentDownloadsEnabled", true,
sortOrder++, true, true, "FcpServer.enablePersistentDownload",
"FcpServer.enablePersistentDownloadLong", cb1 = new
PersistentDownloadsEnabledCallback());
+ PersistentDownloadsEnabledCallback enabledCB = new
PersistentDownloadsEnabledCallback();
+ fcpConfig.register("persistentDownloadsEnabled", true,
sortOrder++, true, true, "FcpServer.enablePersistentDownload",
"FcpServer.enablePersistentDownloadLong", enabledCB);
fcpConfig.register("persistentDownloadsFile", "downloads.dat",
sortOrder++, true, false, "FcpServer.filenameToStorePData",
"FcpServer.filenameToStorePDataLong", cb2 = new
PersistentDownloadsFileCallback());
- fcpConfig.register("persistentDownloadsInterval", (5*60*1000),
sortOrder++, true, false, "FcpServer.intervalBetweenWrites",
"FcpServer.intervalBetweenWritesLong", cb3 = new
PersistentDownloadsIntervalCallback());
+ fcpConfig.register("persistentDownloadsInterval", (5*60*1000),
sortOrder++, true, false, "FcpServer.intervalBetweenWrites",
"FcpServer.intervalBetweenWritesLong", (IntCallback) null);
String persistentDownloadsDir =
fcpConfig.getString("persistentDownloadsFile");
- boolean persistentDownloadsEnabled =
fcpConfig.getBoolean("persistentDownloadsEnabled");
- long persistentDownloadsInterval =
fcpConfig.getLong("persistentDownloadsInterval");
+ boolean persistentDownloadsEnabled =
fcpConfig.getBoolean("persistentDownloadsEnabled");
+ enabledCB.enabled = persistentDownloadsEnabled;
AssumeDDADownloadIsAllowedCallback cb4;
AssumeDDAUploadIsAllowedCallback cb5;
@@ -465,12 +438,10 @@
ssl = fcpConfig.getBoolean("ssl");
}
- FCPServer fcp = new FCPServer(fcpConfig.getString("bindTo"),
fcpConfig.getString("allowedHosts"),
fcpConfig.getString("allowedHostsFullAccess"), fcpConfig.getInt("port"), node,
core, persistentDownloadsEnabled, persistentDownloadsDir,
persistentDownloadsInterval, fcpConfig.getBoolean("enabled"),
fcpConfig.getBoolean("assumeDownloadDDAIsAllowed"),
fcpConfig.getBoolean("assumeUploadDDAIsAllowed"));
+ FCPServer fcp = new FCPServer(fcpConfig.getString("bindTo"),
fcpConfig.getString("allowedHosts"),
fcpConfig.getString("allowedHostsFullAccess"), fcpConfig.getInt("port"), node,
core, persistentDownloadsEnabled, persistentDownloadsDir,
fcpConfig.getBoolean("enabled"),
fcpConfig.getBoolean("assumeDownloadDDAIsAllowed"),
fcpConfig.getBoolean("assumeUploadDDAIsAllowed"), container);
if(fcp != null) {
- cb1.server = fcp;
cb2.server = fcp;
- cb3.server = fcp;
cb4.server = fcp;
cb5.server = fcp;
}
@@ -530,35 +501,14 @@
return L10n.getString("FcpServer."+key, pattern, value);
}
- public void setPersistentDownloadsEnabled(boolean set) {
- synchronized(this) {
- if(enablePersistentDownloads == set) return;
- enablePersistentDownloads = set;
- }
- synchronized(persistenceSync) {
- if(set) {
- if(!haveLoadedPersistentRequests)
- loadPersistentRequests();
- if(canStartPersister)
- startPersister();
- } else {
- killPersister();
- }
- }
- }
-
- public synchronized boolean persistentDownloadsEnabled() {
- return enablePersistentDownloads;
- }
-
- public FCPClient registerClient(String name, NodeClientCore core,
FCPConnectionHandler handler) {
+ public FCPClient registerRebootClient(String name, NodeClientCore core,
FCPConnectionHandler handler) {
FCPClient oldClient;
synchronized(this) {
- oldClient = clientsByName.get(name);
+ oldClient = rebootClientsByName.get(name);
if(oldClient == null) {
// Create new client
- FCPClient client = new FCPClient(name, this,
handler, false, null);
- clientsByName.put(name, client);
+ FCPClient client = new FCPClient(name, handler,
false, null, ClientRequest.PERSIST_REBOOT, null, null);
+ rebootClientsByName.put(name, client);
return client;
} else {
FCPConnectionHandler oldConn =
oldClient.getConnection();
@@ -566,8 +516,10 @@
if(oldConn == null) {
// Easy
oldClient.setConnection(handler);
+ return oldClient;
} else {
// Kill old connection
+ oldConn.setKilledDupe();
oldConn.outputHandler.queue(new
CloseConnectionDuplicateClientNameMessage());
oldConn.close();
oldClient.setConnection(handler);
@@ -575,149 +527,25 @@
}
}
}
- if(handler != null)
-
oldClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler);
- return oldClient;
}
+
+ public FCPClient registerForeverClient(String name, NodeClientCore
core, FCPConnectionHandler handler, ObjectContainer container) {
+ return persistentRoot.registerForeverClient(name, core,
handler, this, container);
+ }
- public void unregisterClient(FCPClient client) {
+ public void unregisterClient(FCPClient client, ObjectContainer
container) {
+ if(client.persistenceType == ClientRequest.PERSIST_REBOOT) {
+ assert(container == null);
synchronized(this) {
String name = client.name;
- clientsByName.remove(name);
+ rebootClientsByName.remove(name);
}
- }
-
- class FCPServerPersister implements Runnable {
-
- private boolean killed;
- private boolean storeNow;
-
- public void force() {
- synchronized(this) {
- storeNow = true;
- notifyAll();
- }
- }
-
- void kill() {
- synchronized(this) {
- killed = true;
- notifyAll();
- }
- }
-
- public void run() {
- freenet.support.Logger.OSThread.logPID(this);
- while(true) {
- long startTime = System.currentTimeMillis();
- try {
- storePersistentRequests();
- } catch (Throwable t) {
- Logger.error(this, "Caught "+t, t);
- }
- long delta = System.currentTimeMillis() -
startTime;
- synchronized(this) {
- long delay =
Math.max(persistenceInterval, delta * 20);
- if(killed) return;
- startTime = System.currentTimeMillis();
- long now;
- while(((now =
System.currentTimeMillis()) < startTime + delay) && !storeNow) {
- try {
- long wait =
Math.max((startTime + delay) - now, Integer.MAX_VALUE);
- if(wait > 0)
-
wait(Math.min(wait, 5000));
- } catch (InterruptedException
e) {
- // Ignore
- }
- if(killed) return;
- }
- storeNow = false;
- }
- }
- }
-
- }
-
- public void forceStorePersistentRequests() {
- if(logMINOR) Logger.minor(this, "Forcing store persistent
requests");
- if(!enablePersistentDownloads) return;
- if(persister != null) {
- persister.force();
} else {
- if(canStartPersister)
- Logger.error(this, "Persister not running,
cannot store persistent requests");
+ persistentRoot.maybeUnregisterClient(client, container);
}
}
-
- /** Store all persistent requests to disk */
- private void storePersistentRequests() {
- logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Storing persistent requests");
- ClientRequest[] persistentRequests = getPersistentRequests();
- if(logMINOR) Logger.minor(this, "Persistent requests count:
"+persistentRequests.length);
- LinkedList<Bucket> toFree = null;
- try {
- synchronized(persistenceSync) {
- toFree =
core.persistentTempBucketFactory.grabBucketsToFree();
-
- File compressedTemp = new
File(persistentDownloadsTempFile+".gz");
- File compressedFinal = new
File(persistentDownloadsFile.toString()+".gz");
- compressedTemp.delete();
-
- FileOutputStream fos = null;
- BufferedOutputStream bos = null;
- GZIPOutputStream gos = null;
- OutputStreamWriter osw = null;
- BufferedWriter w = null;
-
- try {
- fos = new
FileOutputStream(compressedTemp);
- bos = new BufferedOutputStream(fos);
- gos = new GZIPOutputStream(bos);
- osw = new OutputStreamWriter(gos,
"UTF-8");
- w = new BufferedWriter(osw);
-
w.write(Integer.toString(persistentRequests.length)+ '\n');
- for (ClientRequest persistentRequest :
persistentRequests)
- persistentRequest.write(w);
-
- w.flush();
- w.close();
- FileUtil.renameTo(compressedTemp,
compressedFinal);
- } catch (IOException e) {
- Logger.error(this, "Cannot write
persistent requests to disk: "+e);
- } finally {
- Closer.close(w);
- Closer.close(osw);
- Closer.close(gos);
- Closer.close(bos);
- Closer.close(fos);
- }
- }
- if(logMINOR) Logger.minor(this, "Stored persistent
requests");
- } finally {
- if(toFree != null) {
- long freedBuckets = 0;
- for (Bucket current : toFree) {
- try {
- current.free();
- freedBuckets++;
- } catch(Throwable t) {
- try {
-
System.err.println("Caught " + t + " trying to free bucket " + current);
- t.printStackTrace();
- } catch(Throwable t1) { /*
ignore */ }
- }
- }
- // Help it to be collected
- toFree.clear();
- toFree = null;
- if(logMINOR)
- Logger.minor(this, "We have freed
"+freedBuckets+" persistent buckets");
- }
- }
- }
- private void loadPersistentRequests() {
+ private void loadPersistentRequests(ObjectContainer container) {
Logger.normal(this, "Loading persistent requests...");
FileInputStream fis = null;
BufferedInputStream bis = null;
@@ -729,8 +557,7 @@
bis = new BufferedInputStream(gis);
Logger.normal(this, "Loading persistent requests from
"+file);
if (file.length() > 0) {
- loadPersistentRequests(bis);
- haveLoadedPersistentRequests = true;
+ loadPersistentRequests(bis, container);
} else
throw new IOException("File empty"); // If it's
empty, try the temp file.
} catch (IOException e) {
@@ -743,8 +570,7 @@
try {
fis = new FileInputStream(file);
bis = new BufferedInputStream(fis);
- loadPersistentRequests(bis);
- haveLoadedPersistentRequests = true;
+ loadPersistentRequests(bis, container);
} catch (IOException e1) {
Logger.normal(this, "It's corrupted too : Not
reading any persistent requests from disk: "+e1);
return;
@@ -756,7 +582,7 @@
}
}
- private void loadPersistentRequests(InputStream is) throws IOException {
+ private void loadPersistentRequests(InputStream is, ObjectContainer
container) throws IOException {
synchronized(persistenceSync) {
InputStreamReader ris = new InputStreamReader(is,
"UTF-8");
BufferedReader br = new BufferedReader(ris);
@@ -772,7 +598,7 @@
for(int i = 0; i < count; i++) {
WrapperManager.signalStarting(20 * 60 *
1000); // 20 minutes per request; must be >ds lock timeout (10 minutes)
System.out.println("Loading persistent
request " + (i + 1) + " of " + count + "..."); // humans count from 1..
- ClientRequest.readAndRegister(br, this);
+ ClientRequest.readAndRegister(br, this,
container, core.clientContext);
}
Logger.normal(this, "Loaded "+count+"
persistent requests");
}
@@ -783,37 +609,204 @@
}
}
- private ClientRequest[] getPersistentRequests() {
+ public ClientRequest[] getGlobalRequests(ObjectContainer container) {
List<ClientRequest> v = new ArrayList<ClientRequest>();
- synchronized(this) {
- Iterator<FCPClient> i =
clientsByName.values().iterator();
- while(i.hasNext()) {
- FCPClient client = (i.next());
- client.addPersistentRequests(v, true);
+ globalRebootClient.addPersistentRequests(v, false, null);
+ if(!container.ext().isActive(globalForeverClient)) {
+ Logger.error(this, "Somebody deactivated the global
queue!");
+ container.activate(globalForeverClient, 2);
+ }
+ globalForeverClient.addPersistentRequests(v, false, container);
+ return (ClientRequest[]) v.toArray(new ClientRequest[v.size()]);
+ }
+
+ public boolean removeGlobalRequestBlocking(final String identifier)
throws MessageInvalidException {
+ if(!globalRebootClient.removeByIdentifier(identifier, true,
this, null, core.clientContext)) {
+ final Object sync = new Object();
+ final MutableBoolean done = new MutableBoolean();
+ final MutableBoolean success = new MutableBoolean();
+ done.value = false;
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ boolean succeeded = false;
+ try {
+ succeeded =
globalForeverClient.removeByIdentifier(identifier, true, FCPServer.this,
container, core.clientContext);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught
removing identifier "+identifier+": "+t, t);
+ } finally {
+ synchronized(sync) {
+ success.value =
succeeded;
+ done.value = true;
+ sync.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ synchronized(sync) {
+ while(!done.value) {
+ try {
+ sync.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ return success.value;
}
- globalClient.addPersistentRequests(v, true);
+ } else return true;
+ }
+
+ public boolean removeAllGlobalRequestsBlocking() {
+ globalRebootClient.removeAll(null, core.clientContext);
+
+ final Object sync = new Object();
+ final MutableBoolean done = new MutableBoolean();
+ final MutableBoolean success = new MutableBoolean();
+ done.value = false;
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ boolean succeeded = false;
+ try {
+
globalForeverClient.removeAll(container, core.clientContext);
+ succeeded = true;
+ } catch (Throwable t) {
+ Logger.error(this, "Caught while
processing panic: "+t, t);
+ System.err.println("PANIC INCOMPLETE:
CAUGHT "+t);
+ t.printStackTrace();
+ System.err.println("Your requests have
not been deleted!");
+ } finally {
+ synchronized(sync) {
+ success.value = succeeded;
+ done.value = true;
+ sync.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ synchronized(sync) {
+ while(!done.value) {
+ try {
+ sync.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ return success.value;
}
- return v.toArray(new ClientRequest[v.size()]);
}
- public ClientRequest[] getGlobalRequests() {
- List<ClientRequest> v = new ArrayList<ClientRequest>();
- globalClient.addPersistentRequests(v, false);
- return v.toArray(new ClientRequest[v.size()]);
+ public void makePersistentGlobalRequestBlocking(final FreenetURI
fetchURI, final String expectedMimeType, final String persistenceTypeString,
final String returnTypeString) throws NotAllowedException, IOException {
+ class OutputWrapper {
+ NotAllowedException ne;
+ IOException ioe;
+ boolean done;
+ }
+
+ final OutputWrapper ow = new OutputWrapper();
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ NotAllowedException ne = null;
+ IOException ioe = null;
+ try {
+ makePersistentGlobalRequest(fetchURI,
expectedMimeType, persistenceTypeString, returnTypeString, container);
+ } catch (NotAllowedException e) {
+ ne = e;
+ } catch (IOException e) {
+ ioe = e;
+ } catch (Throwable t) {
+ // Unexpected and severe, might even be
OOM, just log it.
+ Logger.error(this, "Failed to make
persistent request: "+t, t);
+ } finally {
+ synchronized(ow) {
+ ow.ne = ne;
+ ow.ioe = ioe;
+ ow.done = true;
+ ow.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ synchronized(ow) {
+ while(true) {
+ if(!ow.done) {
+ try {
+ ow.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ continue;
+ }
+ if(ow.ioe != null) throw ow.ioe;
+ if(ow.ne != null) throw ow.ne;
+ return;
+ }
+ }
}
+
+ public boolean modifyGlobalRequestBlocking(final String identifier,
final String newToken, final short newPriority) {
+ ClientRequest req =
this.globalRebootClient.getRequest(identifier, null);
+ if(req != null) {
+ req.modifyRequest(newToken, newPriority, this, null);
+ return true;
+ } else {
+ class OutputWrapper {
+ boolean success;
+ boolean done;
+ }
+ final OutputWrapper ow = new OutputWrapper();
+ core.clientContext.jobRunner.queue(new DBJob() {
- public void removeGlobalRequest(String identifier) throws
MessageInvalidException {
- globalClient.removeByIdentifier(identifier, true);
+ public void run(ObjectContainer container,
ClientContext context) {
+ boolean success = false;
+ try {
+ ClientRequest req =
globalForeverClient.getRequest(identifier, container);
+ container.activate(req, 1);
+ if(req != null)
+
req.modifyRequest(newToken, newPriority, FCPServer.this, container);
+ container.deactivate(req, 1);
+ success = true;
+ } finally {
+ synchronized(ow) {
+ ow.success = success;
+ ow.done = true;
+ ow.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ synchronized(ow) {
+ while(true) {
+ if(!ow.done) {
+ try {
+ ow.wait();
+ } catch (InterruptedException
e) {
+ // Ignore
+ }
+ continue;
+ }
+ return ow.success;
+ }
+ }
+ }
}
-
+
/**
* Create a persistent globally-queued request for a file.
* @param fetchURI The file to fetch.
* @param persistence The persistence type.
* @param returnType The return type.
* @throws NotAllowedException
+ * @throws IOException
*/
- public void makePersistentGlobalRequest(FreenetURI fetchURI, String
expectedMimeType, String persistenceTypeString, String returnTypeString) throws
NotAllowedException {
+ public void makePersistentGlobalRequest(FreenetURI fetchURI, String
expectedMimeType, String persistenceTypeString, String returnTypeString,
ObjectContainer container) throws NotAllowedException, IOException {
boolean persistence =
persistenceTypeString.equalsIgnoreCase("reboot");
short returnType =
ClientGetMessage.parseReturnType(returnTypeString);
File returnFilename = null, returnTempFilename = null;
@@ -827,20 +820,20 @@
// File returnFilename, File returnTempFilename)
throws IdentifierCollisionException {
try {
- innerMakePersistentGlobalRequest(fetchURI, persistence,
returnType, "FProxy:"+fetchURI.getPreferredFilename(), returnFilename,
returnTempFilename);
+ innerMakePersistentGlobalRequest(fetchURI, persistence,
returnType, "FProxy:"+fetchURI.getPreferredFilename(), returnFilename,
returnTempFilename, container);
return;
} catch (IdentifierCollisionException ee) {
try {
- innerMakePersistentGlobalRequest(fetchURI,
persistence, returnType, "FProxy:"+fetchURI.getDocName(), returnFilename,
returnTempFilename);
+ innerMakePersistentGlobalRequest(fetchURI,
persistence, returnType, "FProxy:"+fetchURI.getDocName(), returnFilename,
returnTempFilename, container);
return;
} catch (IdentifierCollisionException e) {
try {
-
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType,
"FProxy:"+fetchURI.toString(false, false), returnFilename, returnTempFilename);
+
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType,
"FProxy:"+fetchURI.toString(false, false), returnFilename, returnTempFilename,
container);
return;
} catch (IdentifierCollisionException e1) {
// FIXME maybe use DateFormat
try {
-
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType, "FProxy
("+System.currentTimeMillis()+ ')', returnFilename, returnTempFilename);
+
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType, "FProxy
("+System.currentTimeMillis()+ ')', returnFilename, returnTempFilename,
container);
return;
} catch (IdentifierCollisionException
e2) {
while(true) {
@@ -848,7 +841,7 @@
try {
core.random.nextBytes(buf);
String id =
"FProxy:"+Base64.encode(buf);
-
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType, id,
returnFilename, returnTempFilename);
+
innerMakePersistentGlobalRequest(fetchURI, persistence, returnType, id,
returnFilename, returnTempFilename, container);
return;
} catch
(IdentifierCollisionException e3) {}
}
@@ -891,17 +884,14 @@
}
private void innerMakePersistentGlobalRequest(FreenetURI fetchURI,
boolean persistRebootOnly, short returnType, String id, File returnFilename,
- File returnTempFilename) throws
IdentifierCollisionException, NotAllowedException {
- ClientGet cg =
- new ClientGet(globalClient, fetchURI,
defaultFetchContext.localRequestOnly,
+ File returnTempFilename, ObjectContainer container)
throws IdentifierCollisionException, NotAllowedException, IOException {
+ final ClientGet cg =
+ new ClientGet(persistRebootOnly ? globalRebootClient :
globalForeverClient, fetchURI, defaultFetchContext.localRequestOnly,
defaultFetchContext.ignoreStore,
QUEUE_MAX_RETRIES, QUEUE_MAX_RETRIES,
QUEUE_MAX_DATA_SIZE, returnType,
persistRebootOnly, id, Integer.MAX_VALUE,
-
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, returnFilename,
returnTempFilename);
- // Register before starting, because it may complete
immediately, and if it does,
- // we may end up with it not being removable because it wasn't
registered!
- if(cg.isPersistentForever())
- forceStorePersistentRequests();
- cg.start();
+
RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, returnFilename,
returnTempFilename, this);
+ cg.register(container, false, false);
+ cg.start(container, core.clientContext);
}
/**
@@ -909,20 +899,61 @@
* some time to start them.
*/
public void finishStart() {
- this.globalClient.finishStart();
+ node.clientCore.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ globalForeverClient.finishStart(container,
context);
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1, false);
- FCPClient[] clients;
+ final FCPClient[] clients;
synchronized(this) {
- clients = clientsByName.values().toArray(new
FCPClient[clientsByName.size()]);
+ clients = rebootClientsByName.values().toArray(new
FCPClient[rebootClientsByName.size()]);
}
- for (FCPClient client : clients) {
- client.finishStart();
+ if(clients.length > 0) {
+ node.clientCore.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ for (FCPClient client : clients) {
+ container.activate(client, 1);
+ System.err.println("Migrating
client "+client.name);
+ client.finishStart(container,
context);
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1, false);
}
- if(enablePersistentDownloads)
- startPersister();
- canStartPersister = true;
+ if(enablePersistentDownloads) {
+ boolean movedMain = false;
+ if(logMINOR) {
+ Logger.minor(this, "Persistent downloads file
should be "+persistentDownloadsFile);
+ Logger.minor(this, "Persistent downloads temp
file should be "+persistentDownloadsTempFile);
+ }
+ File from = new
File(persistentDownloadsFile.getPath()+".gz");
+ File fromTemp = new
File(persistentDownloadsTempFile.getPath()+".gz");
+ // Rename
+ if(from.exists()) {
+ File target = new
File(from.getPath()+".old.pre-db4o");
+ if(logMINOR)
+ Logger.minor(this, "Trying to move
"+persistentDownloadsFile+" to "+target);
+ if(from.renameTo(target)) {
+ Logger.error(this, "Successfully
migrated persistent downloads and renamed "+from.getName()+" to
"+target.getName());
+ movedMain = true;
+ }
+ }
+ if(fromTemp.exists()) {
+ File target = new
File(fromTemp.getPath()+".old.pre-db4o");
+ if(logMINOR)
+ Logger.minor(this, "Trying to move
"+fromTemp+" to "+target);
+ if(fromTemp.renameTo(target) && !movedMain)
+ Logger.error(this, "Successfully
migrated persistent downloads and renamed "+fromTemp.getName()+" to
"+target.getName());
+ }
+
+ }
+
hasFinishedStart = true;
}
@@ -932,9 +963,16 @@
*
* @return The global FCP client
*/
- public FCPClient getGlobalClient() {
- return globalClient;
+ public FCPClient getGlobalForeverClient() {
+ return globalForeverClient;
}
+
+ public ClientRequest getGlobalRequest(String identifier,
ObjectContainer container) {
+ ClientRequest req = globalRebootClient.getRequest(identifier,
null);
+ if(req == null)
+ req = globalForeverClient.getRequest(identifier,
container);
+ return req;
+ }
protected boolean isDownloadDDAAlwaysAllowed() {
return assumeDownloadDDAIsAllowed;
@@ -949,8 +987,181 @@
}
public void setCompletionCallback(RequestCompletionCallback cb) {
- if(globalClient.setRequestCompletionCallback(cb) != null)
+ if(globalForeverClient.setRequestCompletionCallback(cb) != null)
Logger.error(this, "Replacing request completion
callback "+cb, new Exception("error"));
+ if(globalRebootClient.setRequestCompletionCallback(cb) != null)
+ Logger.error(this, "Replacing request completion
callback "+cb, new Exception("error"));
}
+
+ public void startBlocking(final ClientRequest req) throws
IdentifierCollisionException {
+ if(req.persistenceType == ClientRequest.PERSIST_REBOOT) {
+ req.start(null, core.clientContext);
+ } else {
+ class OutputWrapper {
+ boolean done;
+ IdentifierCollisionException collided;
+ }
+ final OutputWrapper ow = new OutputWrapper();
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ // Don't activate, it may not be stored
yet.
+ try {
+ req.register(container, false,
false);
+ req.start(container, context);
+ } catch (IdentifierCollisionException
e) {
+ ow.collided = e;
+ } finally {
+ synchronized(ow) {
+ ow.done = true;
+ ow.notifyAll();
+ }
+ }
+ container.deactivate(req, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ synchronized(ow) {
+ while(true) {
+ if(!ow.done) {
+ try {
+ ow.wait();
+ } catch (InterruptedException
e) {
+ // Ignore
+ }
+ } else {
+ if(ow.collided != null)
+ throw ow.collided;
+ return;
+ }
+ }
+ }
+ }
+ }
+ public boolean restartBlocking(final String identifier) {
+ ClientRequest req = globalRebootClient.getRequest(identifier,
null);
+ if(req != null) {
+ req.restart(null, core.clientContext);
+ return true;
+ } else {
+ class OutputWrapper {
+ boolean done;
+ boolean success;
+ }
+ final OutputWrapper ow = new OutputWrapper();
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ boolean success = false;
+ try {
+ ClientRequest req =
globalForeverClient.getRequest(identifier, container);
+ if(req != null) {
+ req.restart(container,
context);
+ success = true;
+ }
+ } finally {
+ synchronized(ow) {
+ ow.success = success;
+ ow.done = true;
+ ow.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ synchronized(ow) {
+ while(true) {
+ if(ow.done) return ow.success;
+ try {
+ ow.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+ }
+ }
+
+
+
+ public TempFetchResult getCompletedRequestBlocking(final FreenetURI
key) {
+ ClientGet get = globalRebootClient.getCompletedRequest(key,
null);
+ if(get != null) {
+ // FIXME race condition with free() - arrange
refcounting for the data to prevent this
+ return new TempFetchResult(new
ClientMetadata(get.getMIMEType(null)), get.getBucket(null), false);
+ }
+
+ class OutputWrapper {
+ TempFetchResult result;
+ boolean done;
+ }
+
+ final OutputWrapper ow = new OutputWrapper();
+
+ core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ TempFetchResult result = null;
+ try {
+ ClientGet get =
globalForeverClient.getCompletedRequest(key, container);
+ container.activate(get, 1);
+ if(get != null) {
+ Bucket origData =
get.getBucket(container);
+ container.activate(origData, 5);
+ boolean copied = false;
+ Bucket newData;
+ try {
+ newData =
origData.createShadow();
+ } catch (IOException e) {
+ Logger.error(this,
"Caught error "+e+" trying to create shallow copy, copying data...", e);
+ newData = null;
+ }
+ if(newData == null) {
+ try {
+ newData =
core.tempBucketFactory.makeBucket(origData.size());
+
BucketTools.copy(origData, newData);
+ } catch (IOException e)
{
+
Logger.error(this, "Unable to copy data: "+e, e);
+ result = null;
+ return;
+ }
+ copied = true;
+ }
+ result = new
TempFetchResult(new ClientMetadata(get.getMIMEType(container)), newData,
copied);
+ }
+ container.deactivate(get, 1);
+ } finally {
+ synchronized(ow) {
+ ow.result = result;
+ ow.done = true;
+ ow.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
+ synchronized(ow) {
+ while(true) {
+ if(ow.done) {
+ return ow.result;
+ } else {
+ try {
+ ow.wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+ }
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing FCPServer in database", new
Exception("error"));
+ return false;
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/FinishedCompressionMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/FinishedCompressionMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/FinishedCompressionMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.client.events.FinishedCompressionEvent;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -44,4 +46,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"FinishedCompression goes from server to client not the other way around",
identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GenerateSSKMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.keys.InsertableClientSSK;
import freenet.node.Node;
@@ -40,4 +42,8 @@
handler.outputHandler.queue(msg);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/GetConfig.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetConfig.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GetConfig.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.Fields;
import freenet.support.SimpleFieldSet;
@@ -51,5 +53,9 @@
}
handler.outputHandler.queue(new ConfigData(node, withCurrent,
withDefaults, withSortOrder, withExpertFlag, withForceWriteFlag,
withShortDescription, withLongDescription, withDataTypes, identifier));
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.client.FailureCodeTracker;
import freenet.client.FetchException;
import freenet.keys.FreenetURI;
@@ -137,4 +139,16 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "GetFailed goes
from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ if(redirectURI != null) {
+ container.activate(redirectURI, 5);
+ redirectURI.removeFrom(container); // URI belongs to
the parent which is also being removed.
+ }
+ if(tracker != null) {
+ container.activate(tracker, 5);
+ tracker.removeFrom(container);
+ }
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/GetNode.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetNode.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GetNode.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.Fields;
import freenet.support.SimpleFieldSet;
@@ -44,5 +46,9 @@
}
handler.outputHandler.queue(new NodeData(node, giveOpennetRef,
withPrivate, withVolatile, identifier));
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/GetPluginInfo.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetPluginInfo.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GetPluginInfo.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.pluginmanager.PluginInfoWrapper;
import freenet.support.Fields;
@@ -52,5 +54,9 @@
handler.outputHandler.queue(new PluginInfoMessage(pi,
identifier, detailed));
}
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/GetRequestStatusMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetRequestStatusMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/GetRequestStatusMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,9 +3,14 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.node.Node;
import freenet.support.Fields;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.NativeThread;
public class GetRequestStatusMessage extends FCPMessage {
@@ -33,19 +38,32 @@
}
@Override
- public void run(FCPConnectionHandler handler, Node node)
+ public void run(final FCPConnectionHandler handler, Node node)
throws MessageInvalidException {
- ClientRequest req;
- if(global) {
- req =
handler.server.globalClient.getRequest(identifier);
- } else
- req = handler.getClient().getRequest(identifier);
+ ClientRequest req = handler.getRebootRequest(global, handler,
identifier);
if(req == null) {
- ProtocolErrorMessage msg = new
ProtocolErrorMessage(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, false, null,
identifier, global);
- handler.outputHandler.queue(msg);
+ node.clientCore.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ ClientRequest req =
handler.getForeverRequest(global, handler, identifier, container);
+ container.activate(req, 1);
+ if(req == null) {
+ ProtocolErrorMessage msg = new
ProtocolErrorMessage(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, false, null,
identifier, global);
+
handler.outputHandler.queue(msg);
+ } else {
+
req.sendPendingMessages(handler.outputHandler, true, true, onlyData, container);
+ }
+ container.deactivate(req, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
} else {
- req.sendPendingMessages(handler.outputHandler, true,
true, onlyData);
+ req.sendPendingMessages(handler.outputHandler, true,
true, onlyData, null);
}
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/IdentifierCollisionMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -35,4 +37,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"IdentifierCollision goes from server to client not the other way around",
identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ListPeerMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ListPeerMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ListPeerMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.PeerNode;
import freenet.support.SimpleFieldSet;
@@ -48,4 +50,8 @@
handler.outputHandler.queue(new PeerMessage(pn, true, true,
identifier));
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ListPeerNotesMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ListPeerNotesMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ListPeerNotesMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.DarknetPeerNode;
import freenet.node.Node;
import freenet.node.PeerNode;
@@ -55,5 +57,9 @@
handler.outputHandler.queue(new PeerNote(nodeIdentifier,
noteText, Node.PEER_NOTE_TYPE_PRIVATE_DARKNET_COMMENT, identifier));
handler.outputHandler.queue(new
EndListPeerNotesMessage(nodeIdentifier, identifier));
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ListPeersMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ListPeersMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ListPeersMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.PeerNode;
import freenet.support.Fields;
@@ -46,5 +48,9 @@
handler.outputHandler.queue(new
EndListPeersMessage(identifier));
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ListPersistentRequestsMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ListPersistentRequestsMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ListPersistentRequestsMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,8 +3,13 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.NativeThread;
public class ListPersistentRequestsMessage extends FCPMessage {
@@ -25,15 +30,40 @@
}
@Override
- public void run(FCPConnectionHandler handler, Node node)
+ public void run(final FCPConnectionHandler handler, Node node)
throws MessageInvalidException {
-
handler.getClient().queuePendingMessagesOnConnectionRestart(handler.outputHandler);
-
handler.getClient().queuePendingMessagesFromRunningRequests(handler.outputHandler);
- if(handler.getClient().watchGlobal) {
-
handler.server.globalClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler);
-
handler.server.globalClient.queuePendingMessagesFromRunningRequests(handler.outputHandler);
+
+ FCPClient rebootClient = handler.getRebootClient();
+
+
rebootClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler,
null);
+
rebootClient.queuePendingMessagesFromRunningRequests(handler.outputHandler,
null);
+ if(handler.getRebootClient().watchGlobal) {
+ FCPClient globalRebootClient =
handler.server.globalRebootClient;
+
globalRebootClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler,
null);
+
globalRebootClient.queuePendingMessagesFromRunningRequests(handler.outputHandler,
null);
}
- handler.outputHandler.queue(new
EndListPersistentRequestsMessage());
+
+ node.clientCore.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ FCPClient foreverClient =
handler.getForeverClient(container);
+ container.activate(foreverClient, 1);
+
foreverClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler,
container);
+
foreverClient.queuePendingMessagesFromRunningRequests(handler.outputHandler,
container);
+ if(handler.getRebootClient().watchGlobal) {
+ FCPClient globalForeverClient =
handler.server.globalForeverClient;
+
globalForeverClient.queuePendingMessagesOnConnectionRestart(handler.outputHandler,
container);
+
globalForeverClient.queuePendingMessagesFromRunningRequests(handler.outputHandler,
container);
+ }
+ handler.outputHandler.queue(new
EndListPersistentRequestsMessage());
+ container.deactivate(foreverClient, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1, false);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ModifyConfig.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ModifyConfig.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ModifyConfig.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.config.Config;
import freenet.config.Option;
import freenet.config.SubConfig;
@@ -69,4 +71,8 @@
node.clientCore.storeConfig();
handler.outputHandler.queue(new ConfigData(node, true, false,
false, false, false, false, false, false, identifier));
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/ModifyPeer.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ModifyPeer.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ModifyPeer.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.DarknetPeerNode;
import freenet.node.Node;
import freenet.node.PeerNode;
@@ -88,4 +90,8 @@
handler.outputHandler.queue(new PeerMessage(pn, true, true,
identifier));
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ModifyPeerNote.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ModifyPeerNote.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ModifyPeerNote.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.DarknetPeerNode;
import freenet.node.FSParseException;
import freenet.node.Node;
@@ -82,4 +84,8 @@
handler.outputHandler.queue(new PeerNote(nodeIdentifier,
noteText, peerNoteType, identifier));
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ModifyPersistentRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ModifyPersistentRequest.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ModifyPersistentRequest.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,11 +3,16 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.node.Node;
import freenet.node.RequestStarter;
import freenet.support.Fields;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.NativeThread;
/**
* FCP message: Modify a persistent request.
@@ -67,17 +72,35 @@
}
@Override
- public void run(FCPConnectionHandler handler, Node node)
+ public void run(final FCPConnectionHandler handler, Node node)
throws MessageInvalidException {
- FCPClient client = global ? handler.server.globalClient :
handler.getClient();
- ClientRequest req = client.getRequest(identifier);
- if(req==null){
- Logger.error(this, "Huh ? the request is null!");
- ProtocolErrorMessage msg = new
ProtocolErrorMessage(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, false, null,
identifier, global);
- handler.outputHandler.queue(msg);
- return;
+
+ ClientRequest req = handler.getRebootRequest(global, handler,
identifier);
+ if(req == null) {
+ node.clientCore.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ ClientRequest req =
handler.getForeverRequest(global, handler, identifier, container);
+ container.activate(req, 1);
+ if(req==null){
+ Logger.error(this, "Huh ? the
request is null!");
+ ProtocolErrorMessage msg = new
ProtocolErrorMessage(ProtocolErrorMessage.NO_SUCH_IDENTIFIER, false, null,
identifier, global);
+
handler.outputHandler.queue(msg);
+ return;
+ } else {
+ req.modifyRequest(clientToken,
priorityClass, handler.server, container);
+ }
+ container.deactivate(req, 1);
+ }
+
+ }, NativeThread.NORM_PRIORITY, false);
+ } else {
+ req.modifyRequest(clientToken, priorityClass,
node.clientCore.getFCPServer(), null);
}
-
- req.modifyRequest(clientToken, priorityClass);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/NodeData.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/NodeData.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/NodeData.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -61,4 +63,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "NodeData goes
from server to client not the other way around", identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/NodeHelloMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.l10n.L10n;
import freenet.node.Node;
import freenet.node.NodeStarter;
@@ -58,4 +60,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "NodeHello goes
from server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PeerMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PeerMessage.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PeerMessage.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.PeerNode;
import freenet.support.SimpleFieldSet;
@@ -53,4 +55,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "Peer goes from
server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PeerNote.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PeerNote.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PeerNote.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -5,6 +5,8 @@
import java.io.UnsupportedEncodingException;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.Base64;
import freenet.support.SimpleFieldSet;
@@ -50,4 +52,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PeerNote goes
from server to client not the other way around", identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PeerRemoved.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PeerRemoved.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PeerRemoved.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -40,4 +42,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PeerRemoved goes
from server to client not the other way around", identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PersistentGet.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PersistentGet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PersistentGet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.File;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -88,4 +90,11 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PersistentGet
goes from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(targetFile);
+ container.delete(tempFile);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PersistentPut.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PersistentPut.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PersistentPut.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.File;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -93,4 +95,11 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PersistentPut
goes from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(origFilename);
+ targetURI.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PersistentPutDir.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PersistentPutDir.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PersistentPutDir.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,7 +4,10 @@
package freenet.node.fcp;
import java.util.HashMap;
+import java.util.Iterator;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.ManifestElement;
import freenet.client.async.SimpleManifestPutter;
import freenet.keys.FreenetURI;
@@ -33,10 +36,11 @@
final boolean started;
final int maxRetries;
final boolean wasDiskPut;
+ private final SimpleFieldSet cached;
public PersistentPutDir(String identifier, FreenetURI uri, int
verbosity, short priorityClass,
short persistenceType, boolean global, String defaultName,
HashMap<String, Object> manifestElements,
- String token, boolean started, int maxRetries, boolean
wasDiskPut) {
+ String token, boolean started, int maxRetries, boolean
wasDiskPut, ObjectContainer container) {
this.identifier = identifier;
this.uri = uri;
this.verbosity = verbosity;
@@ -49,10 +53,10 @@
this.started = started;
this.maxRetries = maxRetries;
this.wasDiskPut = wasDiskPut;
+ cached = generateFieldSet(container);
}
- @Override
- public SimpleFieldSet getFieldSet() {
+ private SimpleFieldSet generateFieldSet(ObjectContainer container) {
SimpleFieldSet fs = new SimpleFieldSet(false); // false because
this can get HUGE
fs.putSingle("Identifier", identifier);
fs.putSingle("URI", uri.toString(false, false));
@@ -78,6 +82,8 @@
subset.putSingle("TargetURI",
tempURI.toString());
} else {
Bucket data = e.getData();
+ if(persistenceType ==
ClientRequest.PERSIST_FOREVER)
+ container.activate(data, 1);
if(data instanceof DelayedFreeBucket) {
data =
((DelayedFreeBucket)data).getUnderlying();
}
@@ -87,7 +93,7 @@
// What to do with the bucket?
// It is either a persistent encrypted bucket
or a file bucket ...
if(data == null) {
- Logger.error(this, "Bucket already
freed: "+e.getData()+" for "+e+" for "+identifier);
+ Logger.error(this, "Bucket already
freed: "+e.getData()+" for "+e+" for "+e.getName()+" for "+identifier);
} else if(data instanceof FileBucket) {
subset.putSingle("UploadFrom", "disk");
subset.putSingle("Filename",
((FileBucket)data).getFile().getPath());
@@ -96,6 +102,8 @@
} else {
throw new IllegalStateException("Don't
know what to do with bucket: "+data);
}
+ if(persistenceType ==
ClientRequest.PERSIST_FOREVER)
+ container.deactivate(data, 1);
}
files.put(num, subset);
}
@@ -109,6 +117,11 @@
}
@Override
+ public SimpleFieldSet getFieldSet() {
+ return cached;
+ }
+
+ @Override
public String getName() {
return name;
}
@@ -119,6 +132,23 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PersistentPut
goes from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ removeFrom(manifestElements, container);
+ cached.removeFrom(container);
+ container.delete(this);
+ }
+ private void removeFrom(HashMap manifestElements, ObjectContainer
container) {
+ for(Iterator
i=manifestElements.values().iterator();i.hasNext();) {
+ Object o = i.next();
+ if(o instanceof HashMap)
+ removeFrom((HashMap)o, container);
+ else
+ ((ManifestElement) o).removeFrom(container);
+ }
+ manifestElements.clear();
+ container.delete(manifestElements);
+ }
}
Modified:
trunk/freenet/src/freenet/node/fcp/PersistentRequestModifiedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PersistentRequestModifiedMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PersistentRequestModifiedMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.*;
import freenet.support.*;
@@ -51,4 +53,8 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"PersistentRequestModified goes from server to client not the other way
around", ident, global);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified:
trunk/freenet/src/freenet/node/fcp/PersistentRequestRemovedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PersistentRequestRemovedMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PersistentRequestRemovedMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.*;
import freenet.support.*;
@@ -36,4 +38,8 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"PersistentRequestRemoved goes from server to client not the other way around",
ident, global);
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/PluginInfoMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PluginInfoMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PluginInfoMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.pluginmanager.PluginInfoWrapper;
import freenet.support.SimpleFieldSet;
@@ -57,4 +59,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, NAME + " goes
from server to client not the other way around", null, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ProtocolErrorMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.Fields;
import freenet.support.Logger;
@@ -174,4 +176,8 @@
return "ProtocolError";
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.client.FailureCodeTracker;
import freenet.client.InsertException;
import freenet.keys.FreenetURI;
@@ -111,4 +113,13 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PutFailed goes
from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ container.activate(this, 5); // everything
+ if(tracker != null)
+ tracker.removeFrom(container);
+ if(expectedURI != null)
+ expectedURI.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PutFetchableMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutFetchableMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PutFetchableMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -40,4 +42,9 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "PutFetchable
goes from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/PutSuccessfulMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -29,7 +31,7 @@
if(global) fs.putSingle("Global", "true");
// This is useful for simple clients.
if(uri != null)
- fs.putSingle("URI", uri.toString());
+ fs.putSingle("URI", uri.toString(false, false));
fs.put("StartupTime", startupTime);
fs.put("CompletionTime", completionTime);
return fs;
@@ -46,4 +48,9 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "InsertSuccessful
goes from server to client not the other way around", identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ uri.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/RedirectDirPutFile.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/RedirectDirPutFile.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/RedirectDirPutFile.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -2,6 +2,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.client.async.ManifestElement;
import freenet.keys.FreenetURI;
import freenet.support.Logger;
@@ -35,4 +37,9 @@
public ManifestElement getElement() {
return new ManifestElement(name, targetURI, getMIMEType());
}
+
+ public void removeFrom(ObjectContainer container) {
+ targetURI.removeFrom(container);
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/RemovePeer.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/RemovePeer.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/RemovePeer.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.PeerNode;
import freenet.support.SimpleFieldSet;
@@ -50,4 +52,8 @@
handler.outputHandler.queue(new PeerRemoved(identity,
nodeIdentifier, identifier));
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/RemovePersistentRequest.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/RemovePersistentRequest.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/RemovePersistentRequest.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,8 +3,13 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.node.Node;
import freenet.support.*;
+import freenet.support.io.NativeThread;
/**
* Client telling node to remove a (completed or not) persistent request.
@@ -37,20 +42,33 @@
}
@Override
- public void run(FCPConnectionHandler handler, Node node)
+ public void run(final FCPConnectionHandler handler, Node node)
throws MessageInvalidException {
- FCPClient client = global ? handler.server.globalClient :
handler.getClient();
- ClientRequest req = client.getRequest(identifier);
- if(req==null){
- if(!global)
- req = handler.removeRequestByIdentifier(identifier,
true);
- if(req == null) {
- Logger.error(this, "Huh ? the request is null!");
- return;
- }
- } else {
- client.removeByIdentifier(identifier, true);
- }
-
+ ClientRequest req =
handler.removePersistentRebootRequest(global, identifier);
+ if(req == null && !global) {
+ req = handler.removeRequestByIdentifier(identifier,
true);
+ }
+ if(req == null) {
+ handler.server.core.clientContext.jobRunner.queue(new
DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ try {
+ ClientRequest req =
handler.removePersistentForeverRequest(global, identifier, container);
+ if(req == null) {
+ Logger.error(this, "Huh ? the
request is null!");
+ }
+ } catch (MessageInvalidException e) {
+ FCPMessage err = new
ProtocolErrorMessage(e.protocolCode, false, e.getMessage(), e.ident, e.global);
+
handler.outputHandler.queue(err);
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+ }
}
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/RequestCompletionCallback.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/RequestCompletionCallback.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/RequestCompletionCallback.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,20 +1,22 @@
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
public interface RequestCompletionCallback {
/**
* Callback called when a request succeeds.
*/
- public void notifySuccess(ClientRequest req);
+ public void notifySuccess(ClientRequest req, ObjectContainer container);
/**
* Callback called when a request fails
*/
- public void notifyFailure(ClientRequest req);
+ public void notifyFailure(ClientRequest req, ObjectContainer container);
/**
* Callback when a request is removed
*/
- public void onRemove(ClientRequest req);
+ public void onRemove(ClientRequest req, ObjectContainer container);
}
Modified: trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SSKKeypairMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -38,6 +40,12 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "SSKKeypair goes
from server to client not the other way around", identifier, false);
}
+
+ public void removeFrom(ObjectContainer container) {
+ insertURI.removeFrom(container);
+ requestURI.removeFrom(container);
+ container.delete(this);
+ }
Modified: trunk/freenet/src/freenet/node/fcp/ShutdownMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ShutdownMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/ShutdownMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.fcp.FCPMessage;
import freenet.support.SimpleFieldSet;
@@ -32,5 +34,9 @@
FCPMessage msg = new
ProtocolErrorMessage(ProtocolErrorMessage.SHUTTING_DOWN,true,"The node is
shutting down","Node",false);
handler.outputHandler.queue(msg);
node.exit("Received FCP shutdown message");
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
}
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SimpleProgressMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.client.events.SplitfileProgressEvent;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -71,4 +73,10 @@
return event.finalizedTotal;
}
+ public void removeFrom(ObjectContainer container) {
+ container.activate(event, 1);
+ event.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/StartedCompressionMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/StartedCompressionMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/StartedCompressionMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
@@ -40,4 +42,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"StartedCompression goes from server to client not the other way around",
identifier, global);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/SubscribeUSK.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SubscribeUSK.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SubscribeUSK.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,9 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
import freenet.client.async.USKCallback;
import freenet.keys.USK;
import freenet.node.NodeClientCore;
@@ -21,10 +24,10 @@
this.dontPoll = message.dontPoll;
this.identifier = message.identifier;
this.core = core;
- core.uskManager.subscribe(message.key, this, !message.dontPoll,
handler.getClient().lowLevelClient);
+ core.uskManager.subscribe(message.key, this, !message.dontPoll,
handler.getRebootClient().lowLevelClient);
}
- public void onFoundEdition(long l, USK key) {
+ public void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean wasMetadata, short codec, byte[] data) {
if(handler.isClosed()) {
core.uskManager.unsubscribe(key, this, !dontPoll);
return;
Modified: trunk/freenet/src/freenet/node/fcp/SubscribeUSKMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SubscribeUSKMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SubscribeUSKMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.net.MalformedURLException;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.keys.USK;
import freenet.node.Node;
@@ -69,4 +71,8 @@
handler.outputHandler.queue(reply);
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/SubscribedUSKMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SubscribedUSKMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SubscribedUSKMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -45,4 +47,10 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, name + " goes
from server to client not the other way around", name, false);
}
+
+ @Override
+ public void removeFrom(ObjectContainer container) {
+ message.removeFrom(container);
+ container.delete(this);
+ }
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/node/fcp/SubscribedUSKUpdate.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/SubscribedUSKUpdate.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/SubscribedUSKUpdate.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.USK;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -41,4 +43,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"SubscribedUSKUpdate goes from server to client not the other way around",
identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/TestDDACompleteMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/TestDDACompleteMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/TestDDACompleteMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -6,6 +6,8 @@
import java.io.File;
import java.io.IOException;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.fcp.FCPConnectionHandler.DDACheckJob;
import freenet.support.Logger;
@@ -79,4 +81,8 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, name + " goes
from server to client not the other way around", name, false);
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/TestDDAReplyMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/TestDDAReplyMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/TestDDAReplyMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.fcp.FCPConnectionHandler.DDACheckJob;
import freenet.support.SimpleFieldSet;
@@ -54,4 +56,8 @@
public void run(FCPConnectionHandler handler, Node node) throws
MessageInvalidException {
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, name + " goes
from server to client not the other way around", name, false);
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/TestDDARequestMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/TestDDARequestMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/TestDDARequestMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.fcp.FCPConnectionHandler.DDACheckJob;
import freenet.support.SimpleFieldSet;
@@ -62,4 +64,8 @@
TestDDAReplyMessage reply = new TestDDAReplyMessage(job);
handler.outputHandler.queue(reply);
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/TestDDAResponseMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/TestDDAResponseMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/TestDDAResponseMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.node.fcp.FCPConnectionHandler.DDACheckJob;
import freenet.support.SimpleFieldSet;
@@ -59,4 +61,8 @@
TestDDACompleteMessage reply = new
TestDDACompleteMessage(handler, job, readContent);
handler.outputHandler.queue(reply);
}
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/URIGeneratedMessage.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.keys.FreenetURI;
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -39,4 +41,10 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE, "URIGenerated
goes from server to client not the other way around", identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.activate(uri, 5);
+ uri.removeFrom(container);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/UnknownNodeIdentifierMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/UnknownNodeIdentifierMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/UnknownNodeIdentifierMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -36,4 +38,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"UnknownNodeIdentifier goes from server to client not the other way around",
nodeIdentifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/UnknownPeerNoteTypeMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/UnknownPeerNoteTypeMessage.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/UnknownPeerNoteTypeMessage.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
import freenet.node.Node;
import freenet.support.SimpleFieldSet;
@@ -36,4 +38,8 @@
throw new
MessageInvalidException(ProtocolErrorMessage.INVALID_MESSAGE,
"UnknownPeerNoteType goes from server to client not the other way around",
identifier, false);
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/fcp/WatchGlobal.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/WatchGlobal.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/node/fcp/WatchGlobal.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,9 +3,14 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.node.fcp;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
import freenet.node.Node;
import freenet.support.Fields;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.NativeThread;
public class WatchGlobal extends FCPMessage {
@@ -40,9 +45,24 @@
}
@Override
- public void run(FCPConnectionHandler handler, Node node)
+ public void run(final FCPConnectionHandler handler, Node node)
throws MessageInvalidException {
- handler.getClient().setWatchGlobal(enabled, verbosityMask);
+ handler.getRebootClient().setWatchGlobal(enabled,
verbosityMask, node.clientCore.getFCPServer(), null);
+ handler.server.core.clientContext.jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ FCPClient client =
handler.getForeverClient(container);
+ container.activate(client, 1);
+ client.setWatchGlobal(enabled, verbosityMask,
handler.server, container);
+ container.deactivate(client, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY, false);
+
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/node/simulator/RealNodeBusyNetworkTest.java
===================================================================
--- trunk/freenet/src/freenet/node/simulator/RealNodeBusyNetworkTest.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/simulator/RealNodeBusyNetworkTest.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -144,7 +144,7 @@
}
long totalRunningRequests = 0;
for(int j=0;j<nodes.length;j++) {
- totalRunningRequests +=
nodes[j].clientCore.countQueuedRequests();
+ totalRunningRequests +=
nodes[j].clientCore.countTransientQueuedRequests();
}
System.err.println("Running requests: "+totalRunningRequests);
}
@@ -154,7 +154,7 @@
while(true) {
long totalRunningRequests = 0;
for(int i=0;i<nodes.length;i++) {
- totalRunningRequests +=
nodes[i].clientCore.countQueuedRequests();
+ totalRunningRequests +=
nodes[i].clientCore.countTransientQueuedRequests();
}
System.err.println("Running requests: "+totalRunningRequests);
if(totalRunningRequests == 0) break;
Modified: trunk/freenet/src/freenet/node/updater/NodeUpdateManager.java
===================================================================
--- trunk/freenet/src/freenet/node/updater/NodeUpdateManager.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/updater/NodeUpdateManager.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -6,6 +6,8 @@
import org.tanukisoftware.wrapper.WrapperManager;
+import com.db4o.ObjectContainer;
+
import freenet.config.Config;
import freenet.config.InvalidConfigValueException;
import freenet.config.SubConfig;
@@ -975,4 +977,10 @@
protected long getStartedFetchingNextExtJarTimestamp() {
return startedFetchingNextExtJar;
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing NodeUpdateManager in database",
new Exception("error"));
+ return false;
+ }
+
}
Modified: trunk/freenet/src/freenet/node/updater/NodeUpdater.java
===================================================================
--- trunk/freenet/src/freenet/node/updater/NodeUpdater.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/updater/NodeUpdater.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -12,18 +12,22 @@
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.client.InsertException;
import freenet.client.async.BaseClientPutter;
import freenet.client.async.ClientCallback;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientGetter;
import freenet.client.async.USKCallback;
import freenet.keys.FreenetURI;
import freenet.keys.USK;
import freenet.node.Node;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.node.Ticker;
import freenet.node.Version;
@@ -33,7 +37,7 @@
import freenet.support.io.Closer;
import freenet.support.io.FileBucket;
-public class NodeUpdater implements ClientCallback, USKCallback {
+public class NodeUpdater implements ClientCallback, USKCallback, RequestClient
{
static private boolean logMINOR;
private FetchContext ctx;
@@ -86,14 +90,14 @@
try {
// because of UoM, this version is actually worth
having as well
USK myUsk =
USK.create(URI.setSuggestedEdition(currentVersion));
- ctx.uskManager.subscribe(myUsk, this, true, this);
+ core.uskManager.subscribe(myUsk, this, true, this);
} catch(MalformedURLException e) {
Logger.error(this, "The auto-update URI isn't valid and
can't be used");
manager.blow("The auto-update URI isn't valid and can't
be used");
}
}
-
- public void onFoundEdition(long l, USK key) {
+
+ public void onFoundEdition(long l, USK key, ObjectContainer container,
ClientContext context, boolean wasMetadata, short codec, byte[] data) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR)
Logger.minor(this, "Found edition " + l);
@@ -170,7 +174,7 @@
File.createTempFile(blobFilenamePrefix + availableVersion + "-", ".fblob.tmp",
manager.node.clientCore.getPersistentTempDir());
FreenetURI uri =
URI.setSuggestedEdition(availableVersion);
uri = uri.sskForUSK();
- cg = new ClientGetter(this,
core.requestStarters.chkFetchScheduler, core.requestStarters.sskFetchScheduler,
+ cg = new ClientGetter(this,
uri, ctx,
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
this, null, new
FileBucket(tempBlobFile, false, false, false, false, false));
toStart = cg;
@@ -185,7 +189,7 @@
}
if(toStart != null)
try {
- toStart.start();
+ node.clientCore.clientContext.start(toStart);
} catch(FetchException e) {
Logger.error(this, "Error while starting the
fetching: " + e, e);
synchronized(this) {
@@ -193,7 +197,7 @@
}
}
if(cancelled != null)
- cancelled.cancel();
+ cancelled.cancel(null, core.clientContext);
}
File getBlobFile(int availableVersion) {
@@ -225,7 +229,7 @@
System.err.println("Written " + (extUpdate ? "ext" : "main") +
" jar to " + fNew);
}
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
onSuccess(result, state, tempBlobFile, fetchingVersion);
}
@@ -339,7 +343,7 @@
private static final String RECOMMENDED_EXT_PREFIX =
"Recommended-Ext-Version: ";
private static final int MAX_MANIFEST_SIZE = 1024*1024;
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(!isRunning)
return;
@@ -376,15 +380,15 @@
}
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
// Impossible
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
// Impossible
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
// Impossible
}
@@ -399,11 +403,11 @@
synchronized(this) {
isRunning = false;
USK myUsk =
USK.create(URI.setSuggestedEdition(currentVersion));
- ctx.uskManager.unsubscribe(myUsk, this, true);
+ core.uskManager.unsubscribe(myUsk, this, true);
c = cg;
cg = null;
}
- c.cancel();
+ c.cancel(null, core.clientContext);
} catch(Exception e) {
Logger.minor(this, "Cannot kill NodeUpdater", e);
}
@@ -413,7 +417,7 @@
return URI;
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
@@ -421,7 +425,7 @@
return fetchedVersion > currentVersion;
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// Ignore, we don't insert
}
@@ -469,6 +473,10 @@
return RequestStarter.INTERACTIVE_PRIORITY_CLASS;
}
+ public boolean persistent() {
+ return false;
+ }
+
public void setMinMax(int requiredExt, int recommendedExt) {
int callFinishedFound = -1;
synchronized(this) {
@@ -491,4 +499,14 @@
if(callFinishedFound > -1)
finishOnFoundEdition(callFinishedFound);
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing NodeUpdater in database", new
Exception("error"));
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/updater/RevocationChecker.java
===================================================================
--- trunk/freenet/src/freenet/node/updater/RevocationChecker.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/updater/RevocationChecker.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
import java.io.File;
import java.io.IOException;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -12,6 +14,7 @@
import freenet.client.async.ClientGetter;
import freenet.keys.FreenetURI;
import freenet.node.NodeClientCore;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.support.Logger;
import freenet.support.io.FileBucket;
@@ -21,7 +24,7 @@
* Fetches the revocation key. Each time it starts, it will try to fetch it
until it has 3 DNFs. If it ever finds it, it will
* be immediately fed to the NodeUpdateManager.
*/
-public class RevocationChecker implements ClientCallback {
+public class RevocationChecker implements ClientCallback, RequestClient {
public final static int REVOCATION_DNF_MIN = 3;
@@ -104,17 +107,17 @@
} catch (IOException e) {
Logger.error(this, "Cannot
record revocation fetch (therefore cannot pass it on to peers)!: "+e, e);
}
- cg = revocationGetter = new
ClientGetter(this, core.requestStarters.chkFetchScheduler,
-
core.requestStarters.sskFetchScheduler, manager.revocationURI, ctxRevocation,
+ cg = revocationGetter = new
ClientGetter(this,
+ manager.revocationURI,
ctxRevocation,
aggressive ?
RequestStarter.MAXIMUM_PRIORITY_CLASS :
RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS,
this, null, tmpBlobFile
== null ? null : new FileBucket(tmpBlobFile, false, false, false, false,
false));
if(logMINOR) Logger.minor(this, "Queued
another revocation fetcher (count="+revocationDNFCounter+")");
}
}
if(toCancel != null)
- toCancel.cancel();
+ toCancel.cancel(null, core.clientContext);
if(cg != null) {
- cg.start();
+ core.clientContext.start(cg);
if(logMINOR) Logger.minor(this, "Started
revocation fetcher");
}
} catch (FetchException e) {
@@ -138,7 +141,7 @@
start(wasAggressive);
}
- public void onSuccess(FetchResult result, ClientGetter state) {
+ public void onSuccess(FetchResult result, ClientGetter state,
ObjectContainer container) {
onSuccess(result, state, tmpBlobFile);
}
@@ -171,7 +174,7 @@
FileUtil.renameTo(tmpBlobFile, blobFile);
}
- public void onFailure(FetchException e, ClientGetter state) {
+ public void onFailure(FetchException e, ClientGetter state,
ObjectContainer container) {
onFailure(e, state, tmpBlobFile);
}
@@ -212,34 +215,34 @@
start(wasAggressive, false);
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state, ObjectContainer
container) {
// TODO Auto-generated method stub
}
- public void onFailure(InsertException e, BaseClientPutter state) {
+ public void onFailure(InsertException e, BaseClientPutter state,
ObjectContainer container) {
// TODO Auto-generated method stub
}
- public void onGeneratedURI(FreenetURI uri, BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri, BaseClientPutter state,
ObjectContainer container) {
// TODO Auto-generated method stub
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// TODO Auto-generated method stub
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state, ObjectContainer
container) {
// TODO Auto-generated method stub
}
public void kill() {
if(revocationGetter != null)
- revocationGetter.cancel();
+ revocationGetter.cancel(null, core.clientContext);
}
public long getBlobSize() {
@@ -253,4 +256,12 @@
return null;
}
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
}
Modified: trunk/freenet/src/freenet/node/updater/UpdateOverMandatoryManager.java
===================================================================
--- trunk/freenet/src/freenet/node/updater/UpdateOverMandatoryManager.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/node/updater/UpdateOverMandatoryManager.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -16,6 +16,8 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchContext;
import freenet.client.FetchException;
import freenet.client.FetchResult;
@@ -40,6 +42,7 @@
import freenet.node.Node;
import freenet.node.NodeStarter;
import freenet.node.PeerNode;
+import freenet.node.RequestClient;
import freenet.node.RequestStarter;
import freenet.node.Version;
import freenet.node.useralerts.AbstractUserAlert;
@@ -58,7 +61,7 @@
* do about them.
* @author toad
*/
-public class UpdateOverMandatoryManager {
+public class UpdateOverMandatoryManager implements RequestClient {
final NodeUpdateManager updateManager;
/** Set of PeerNode's which say (or said before they disconnected)
@@ -914,9 +917,9 @@
// Fetch our revocation key from the datastore plus the binary
blob
- FetchContext tempContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext seedContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext tempContext = new FetchContext(seedContext,
FetchContext.IDENTICAL_MASK, true, blocks);
tempContext.localRequestOnly = true;
- tempContext.blocks = blocks;
File f;
FileBucket b = null;
@@ -935,7 +938,7 @@
ClientCallback myCallback = new ClientCallback() {
- public void onFailure(FetchException e, ClientGetter
state) {
+ public void onFailure(FetchException e, ClientGetter
state, ObjectContainer container) {
if(e.mode == FetchException.CANCELLED) {
// Eh?
Logger.error(this, "Cancelled fetch
from store/blob of revocation certificate from " + source.userToString());
@@ -961,45 +964,43 @@
}
}
- public void onFailure(InsertException e,
BaseClientPutter state) {
+ public void onFailure(InsertException e,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state,
ObjectContainer container) {
// Irrelevant
}
- public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onSuccess(FetchResult result, ClientGetter
state) {
+ public void onSuccess(FetchResult result, ClientGetter
state, ObjectContainer container) {
System.err.println("Got revocation certificate
from " + source.userToString());
updateManager.revocationChecker.onSuccess(result, state, cleanedBlobFile);
temp.delete();
insertBlob(updateManager.revocationChecker.getBlobFile(), "revocation");
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state,
ObjectContainer container) {
// Ignore, not possible
}
};
ClientGetter cg = new ClientGetter(myCallback,
-
updateManager.node.clientCore.requestStarters.chkFetchScheduler,
-
updateManager.node.clientCore.requestStarters.sskFetchScheduler,
updateManager.revocationURI, tempContext, (short) 0,
this, null, cleanedBlob);
try {
- cg.start();
+ updateManager.node.clientCore.clientContext.start(cg);
} catch(FetchException e1) {
System.err.println("Failed to decode UOM blob: " + e1);
e1.printStackTrace();
- myCallback.onFailure(e1, cg);
+ myCallback.onFailure(e1, cg, null);
}
}
@@ -1007,31 +1008,31 @@
protected void insertBlob(final File blob, final String type) {
ClientCallback callback = new ClientCallback() {
- public void onFailure(FetchException e, ClientGetter
state) {
+ public void onFailure(FetchException e, ClientGetter
state, ObjectContainer container) {
// Ignore, can't happen
}
-
- public void onFailure(InsertException e,
BaseClientPutter state) {
+
+ public void onFailure(InsertException e,
BaseClientPutter state, ObjectContainer container) {
Logger.error(this, "Failed to insert "+type+"
binary blob: " + e, e);
}
-
- public void onFetchable(BaseClientPutter state) {
+
+ public void onFetchable(BaseClientPutter state,
ObjectContainer container) {
// Ignore
}
-
- public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state) {
+
+ public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state, ObjectContainer container) {
// Ignore
}
-
- public void onMajorProgress() {
+
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
-
- public void onSuccess(FetchResult result, ClientGetter
state) {
+
+ public void onSuccess(FetchResult result, ClientGetter
state, ObjectContainer container) {
// Ignore, can't happen
}
-
- public void onSuccess(BaseClientPutter state) {
+
+ public void onSuccess(BaseClientPutter state,
ObjectContainer container) {
// All done. Cool.
Logger.normal(this, "Inserted "+type+" binary
blob");
}
@@ -1039,11 +1040,9 @@
FileBucket bucket = new FileBucket(blob, true, false, false,
false, false);
ClientPutter putter = new ClientPutter(callback, bucket,
FreenetURI.EMPTY_CHK_URI, null,
updateManager.node.clientCore.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS).getInsertContext(true),
-
updateManager.node.clientCore.requestStarters.chkPutScheduler,
-
updateManager.node.clientCore.requestStarters.sskPutScheduler,
RequestStarter.INTERACTIVE_PRIORITY_CLASS, false,
false, this, null, null, true);
try {
- putter.start(false);
+
updateManager.node.clientCore.clientContext.start(putter, false);
} catch(InsertException e1) {
Logger.error(this, "Failed to start insert of "+type+"
binary blob: " + e1, e1);
}
@@ -1415,9 +1414,9 @@
// Fetch the jar from the datastore plus the binary blob
- FetchContext tempContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext seedContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext tempContext = new FetchContext(seedContext,
FetchContext.IDENTICAL_MASK, true, blocks);
tempContext.localRequestOnly = true;
- tempContext.blocks = blocks;
File f;
FileBucket b = null;
@@ -1437,7 +1436,7 @@
ClientCallback myCallback = new ClientCallback() {
- public void onFailure(FetchException e, ClientGetter
state) {
+ public void onFailure(FetchException e, ClientGetter
state, ObjectContainer container) {
if(e.mode == FetchException.CANCELLED) {
// Eh?
Logger.error(this, "Cancelled fetch
from store/blob of main jar (" + version + ") from " + source.userToString());
@@ -1454,23 +1453,23 @@
}
}
- public void onFailure(InsertException e,
BaseClientPutter state) {
+ public void onFailure(InsertException e,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state,
ObjectContainer container) {
// Irrelevant
}
- public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onSuccess(FetchResult result, ClientGetter
state) {
+ public void onSuccess(FetchResult result, ClientGetter
state, ObjectContainer container) {
System.err.println("Got main jar version " +
version + " from " + source.userToString());
if(result.size() == 0) {
System.err.println("Ignoring because 0
bytes long");
@@ -1487,20 +1486,18 @@
insertBlob(mainUpdater.getBlobFile(version),
"main jar");
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state,
ObjectContainer container) {
// Ignore, not possible
}
};
ClientGetter cg = new ClientGetter(myCallback,
-
updateManager.node.clientCore.requestStarters.chkFetchScheduler,
-
updateManager.node.clientCore.requestStarters.sskFetchScheduler,
uri, tempContext, (short) 0, this, null, cleanedBlob);
try {
- cg.start();
+ updateManager.node.clientCore.clientContext.start(cg);
} catch(FetchException e1) {
- myCallback.onFailure(e1, cg);
+ myCallback.onFailure(e1, cg, null);
}
}
@@ -1538,9 +1535,9 @@
// Fetch the jar from the datastore plus the binary blob
- FetchContext tempContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext seedContext =
updateManager.node.clientCore.makeClient((short) 0, true).getFetchContext();
+ FetchContext tempContext = new FetchContext(seedContext,
FetchContext.IDENTICAL_MASK, true, blocks);
tempContext.localRequestOnly = true;
- tempContext.blocks = blocks;
File f;
FileBucket b = null;
@@ -1560,7 +1557,7 @@
ClientCallback myCallback = new ClientCallback() {
- public void onFailure(FetchException e, ClientGetter
state) {
+ public void onFailure(FetchException e, ClientGetter
state, ObjectContainer container) {
if(e.mode == FetchException.CANCELLED) {
// Eh?
Logger.error(this, "Cancelled fetch
from store/blob of ext jar (" + version + ") from " + source.userToString());
@@ -1577,23 +1574,23 @@
}
}
- public void onFailure(InsertException e,
BaseClientPutter state) {
+ public void onFailure(InsertException e,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onFetchable(BaseClientPutter state) {
+ public void onFetchable(BaseClientPutter state,
ObjectContainer container) {
// Irrelevant
}
- public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state) {
+ public void onGeneratedURI(FreenetURI uri,
BaseClientPutter state, ObjectContainer container) {
// Ignore, not possible
}
- public void onMajorProgress() {
+ public void onMajorProgress(ObjectContainer container) {
// Ignore
}
- public void onSuccess(FetchResult result, ClientGetter
state) {
+ public void onSuccess(FetchResult result, ClientGetter
state, ObjectContainer container) {
System.err.println("Got ext jar version " +
version + " from " + source.userToString());
if(result.size() == 0) {
System.err.println("Ignoring because 0
bytes long");
@@ -1610,21 +1607,19 @@
insertBlob(extUpdater.getBlobFile(version),
"ext jar");
}
- public void onSuccess(BaseClientPutter state) {
+ public void onSuccess(BaseClientPutter state,
ObjectContainer container) {
// Ignore, not possible
}
};
ClientGetter cg = new ClientGetter(myCallback,
-
updateManager.node.clientCore.requestStarters.chkFetchScheduler,
-
updateManager.node.clientCore.requestStarters.sskFetchScheduler,
- uri, tempContext, (short) 0, this, null, cleanedBlob);
+ uri, tempContext, (short) 0, this, null,
cleanedBlob);
- try {
- cg.start();
- } catch(FetchException e1) {
- myCallback.onFailure(e1, cg);
- }
+ try {
+
updateManager.node.clientCore.clientContext.start(cg);
+ } catch(FetchException e1) {
+ myCallback.onFailure(e1, cg, null);
+ }
}
@@ -1699,4 +1694,12 @@
return !gotError;
}
+
+ public boolean persistent() {
+ return false;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
}
Modified: trunk/freenet/src/freenet/pluginmanager/PluginReplySender.java
===================================================================
--- trunk/freenet/src/freenet/pluginmanager/PluginReplySender.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/pluginmanager/PluginReplySender.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -17,14 +17,14 @@
identifier = identifier2;
}
- public void send(SimpleFieldSet params) {
+ public void send(SimpleFieldSet params) throws PluginNotFoundException {
send(params, (Bucket)null);
}
- public void send(SimpleFieldSet params, byte[] data) {
+ public void send(SimpleFieldSet params, byte[] data) throws
PluginNotFoundException {
send(params, new ArrayBucket(data));
}
- public abstract void send(SimpleFieldSet params, Bucket bucket);
+ public abstract void send(SimpleFieldSet params, Bucket bucket) throws
PluginNotFoundException;
}
Modified: trunk/freenet/src/freenet/pluginmanager/PluginReplySenderFCP.java
===================================================================
--- trunk/freenet/src/freenet/pluginmanager/PluginReplySenderFCP.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/pluginmanager/PluginReplySenderFCP.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -22,9 +22,11 @@
}
@Override
- public void send(SimpleFieldSet params, Bucket bucket) {
+ public void send(SimpleFieldSet params, Bucket bucket) throws
PluginNotFoundException {
+ // like in linux everthing is a file, in Plugintalker
everything is a plugin. So it throws PluginNotFoundException
+ // instead fcp connection errors
+ if (handler.isClosed()) throw new PluginNotFoundException("FCP
connection closed");
FCPPluginReply reply = new FCPPluginReply(pluginname,
identifier, params, bucket);
handler.outputHandler.queue(reply);
}
-
}
Modified: trunk/freenet/src/freenet/pluginmanager/PluginTalker.java
===================================================================
--- trunk/freenet/src/freenet/pluginmanager/PluginTalker.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/pluginmanager/PluginTalker.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -58,7 +58,13 @@
node.executor.execute(new Runnable() {
public void run() {
- plugin.handle(replysender, plugparams, data2,
access);
+
+ try {
+ plugin.handle(replysender, plugparams,
data2, access);
+ } catch (Throwable t) {
+ Logger.error(this, "Cought error while
execute fcp plugin handler: " + t.getMessage(), t);
+ }
+
}
}, "FCPPlugin talk runner for " + this);
Deleted: trunk/freenet/src/freenet/support/BinaryBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/BinaryBloomFilter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/BinaryBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,81 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
- * Public License, version 2 (or at your option any later version). See
- * http://www.gnu.org/ for further details of the GPL. */
-package freenet.support;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel.MapMode;
-
-/**
- * @author sdiz
- */
-public class BinaryBloomFilter extends BloomFilter {
- /**
- * Constructor
- *
- * @param length
- * length in bits
- */
- protected BinaryBloomFilter(int length, int k) {
- super(length, k);
- filter = ByteBuffer.allocate(this.length / 8);
- }
-
- /**
- * Constructor
- *
- * @param file
- * disk file
- * @param length
- * length in bits
- * @throws IOException
- */
- protected BinaryBloomFilter(File file, int length, int k) throws
IOException {
- super(length, k);
- if (!file.exists() || file.length() != length / 8)
- needRebuild = true;
-
- RandomAccessFile raf = new RandomAccessFile(file, "rw");
- raf.setLength(length / 8);
- filter = raf.getChannel().map(MapMode.READ_WRITE, 0, length /
8).load();
- }
-
- @Override
- public void removeKey(byte[] key) {
- // ignore
- }
-
- @Override
- protected boolean getBit(int offset) {
- return (filter.get(offset / 8) & (1 << (offset % 8))) != 0;
- }
-
- @Override
- protected void setBit(int offset) {
- byte b = filter.get(offset / 8);
- b |= 1 << (offset % 8);
- filter.put(offset / 8, b);
- }
-
- @Override
- protected void unsetBit(int offset) {
- // NO-OP
- }
-
- @Override
- public void fork(int k) {
- lock.writeLock().lock();
- try {
- File tempFile = File.createTempFile("bloom-", ".tmp");
- tempFile.deleteOnExit();
- forkedFilter = new BinaryBloomFilter(tempFile, length,
k);
- } catch (IOException e) {
- forkedFilter = new BinaryBloomFilter(length, k);
- } finally {
- lock.writeLock().unlock();
- }
- }
-}
Copied: trunk/freenet/src/freenet/support/BinaryBloomFilter.java (from rev
26320, branches/db4o/freenet/src/freenet/support/BinaryBloomFilter.java)
===================================================================
--- trunk/freenet/src/freenet/support/BinaryBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/BinaryBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,86 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel.MapMode;
+
+/**
+ * @author sdiz
+ */
+public class BinaryBloomFilter extends BloomFilter {
+ /**
+ * Constructor
+ *
+ * @param length
+ * length in bits
+ */
+ protected BinaryBloomFilter(int length, int k) {
+ super(length, k);
+ filter = ByteBuffer.allocate(this.length / 8);
+ }
+
+ /**
+ * Constructor
+ *
+ * @param file
+ * disk file
+ * @param length
+ * length in bits
+ * @throws IOException
+ */
+ protected BinaryBloomFilter(File file, int length, int k) throws
IOException {
+ super(length, k);
+ if (!file.exists() || file.length() != length / 8)
+ needRebuild = true;
+
+ RandomAccessFile raf = new RandomAccessFile(file, "rw");
+ raf.setLength(length / 8);
+ filter = raf.getChannel().map(MapMode.READ_WRITE, 0, length /
8).load();
+ }
+
+ public BinaryBloomFilter(ByteBuffer slice, int length, int k) {
+ super(length, k);
+ filter = slice;
+ }
+
+ @Override
+ public void removeKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ protected boolean getBit(int offset) {
+ return (filter.get(offset / 8) & (1 << (offset % 8))) != 0;
+ }
+
+ @Override
+ protected void setBit(int offset) {
+ byte b = filter.get(offset / 8);
+ b |= 1 << (offset % 8);
+ filter.put(offset / 8, b);
+ }
+
+ @Override
+ protected void unsetBit(int offset) {
+ // NO-OP
+ }
+
+ @Override
+ public void fork(int k) {
+ lock.writeLock().lock();
+ try {
+ File tempFile = File.createTempFile("bloom-", ".tmp");
+ tempFile.deleteOnExit();
+ forkedFilter = new BinaryBloomFilter(tempFile, length,
k);
+ } catch (IOException e) {
+ forkedFilter = new BinaryBloomFilter(length, k);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+}
Property changes on: trunk/freenet/src/freenet/support/BinaryBloomFilter.java
___________________________________________________________________
Added: svn:mergeinfo
+
Modified: trunk/freenet/src/freenet/support/BitArray.java
===================================================================
--- trunk/freenet/src/freenet/support/BitArray.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/support/BitArray.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -32,6 +32,11 @@
private final int _size;
private final byte[] _bits;
+ public BitArray(byte[] data) {
+ _bits = data;
+ _size = data.length*8;
+ }
+
/**
* This constructor does not check for unacceptable sizes, and should
only be used on trusted data.
*/
Deleted: trunk/freenet/src/freenet/support/BloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/BloomFilter.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/support/BloomFilter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,205 +0,0 @@
-package freenet.support;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.MappedByteBuffer;
-import java.util.Random;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.spaceroots.mantissa.random.MersenneTwister;
-
-public abstract class BloomFilter {
- protected ByteBuffer filter;
-
- /** Number of hash functions */
- protected final int k;
- protected final int length;
-
- protected ReadWriteLock lock = new ReentrantReadWriteLock();
-
- public static BloomFilter createFilter(int length, int k, boolean
counting) {
- if (length == 0)
- return new NullBloomFilter(length, k);
- if (counting)
- return new CountingBloomFilter(length, k);
- else
- return new BinaryBloomFilter(length, k);
- }
-
- public static BloomFilter createFilter(File file, int length, int k,
boolean counting) throws IOException {
- if (length == 0)
- return new NullBloomFilter(length, k);
- if (counting)
- return new CountingBloomFilter(file, length, k);
- else
- return new BinaryBloomFilter(file, length, k);
- }
-
- protected BloomFilter(int length, int k) {
- if (length % 8 != 0)
- length -= length % 8;
-
- this.length = length;
- this.k = k;
- }
-
- //-- Core
- public void addKey(byte[] key) {
- Random hashes = getHashes(key);
- lock.writeLock().lock();
- try {
- for (int i = 0; i < k; i++)
- setBit(hashes.nextInt(length));
- } finally {
- lock.writeLock().unlock();
- }
-
- if (forkedFilter != null)
- forkedFilter.addKey(key);
- }
-
- // add to the forked filter only
- public void addKeyForked(byte[] key) {
- if (forkedFilter != null)
- forkedFilter.addKey(key);
- }
-
- public boolean checkFilter(byte[] key) {
- Random hashes = getHashes(key);
- lock.readLock().lock();
- try {
- for (int i = 0; i < k; i++)
- if (!getBit(hashes.nextInt(length)))
- return false;
- } finally {
- lock.readLock().unlock();
- }
- return true;
- }
-
- public void removeKey(byte[] key) {
- Random hashes = getHashes(key);
- lock.writeLock().lock();
- try {
- for (int i = 0; i < k; i++)
- unsetBit(hashes.nextInt(length));
- } finally {
- lock.writeLock().unlock();
- }
-
- if (forkedFilter != null)
- forkedFilter.removeKey(key);
- }
-
- //-- Bits and Hashes
- protected abstract boolean getBit(int offset);
-
- protected abstract void setBit(int offset);
-
- protected abstract void unsetBit(int offset);
-
- protected Random getHashes(byte[] key) {
- return new MersenneTwister(key);
- }
-
- //-- Fork & Merge
- protected BloomFilter forkedFilter;
-
- /**
- * Create an empty, in-memory copy of bloom filter. New updates are
written to both filters.
- * This is written back to disk on #merge()
- */
- public abstract void fork(int k);
-
- public void merge() {
- lock.writeLock().lock();
- try {
- if (forkedFilter == null)
- return;
-
- Lock forkedLock = forkedFilter.lock.writeLock();
- forkedLock.lock();
- try {
- filter.position(0);
- forkedFilter.filter.position(0);
-
- filter.put(forkedFilter.filter);
-
- filter.position(0);
- forkedFilter.close();
- forkedFilter = null;
- } finally {
- forkedLock.unlock();
- }
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- public void discard() {
- lock.writeLock().lock();
- try {
- if (forkedFilter == null)
- return;
- forkedFilter.close();
- forkedFilter = null;
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- //-- Misc.
- /**
- * Calculate optimal K value
- *
- * @param filterLength
- * filter length in bits
- * @param maxKey
- * @return optimal K
- */
- public static int optimialK(int filterLength, long maxKey) {
- long k = Math.round(Math.log(2) * filterLength / maxKey);
-
- if (k > 64)
- k = 64;
- if (k < 1)
- k = 1;
-
- return (int) k;
- }
-
- public int getK() {
- return k;
- }
-
- protected boolean needRebuild;
-
- public boolean needRebuild() {
- boolean _needRebuild = needRebuild;
- needRebuild = false;
- return _needRebuild;
-
- }
-
- public void force() {
- if (filter instanceof MappedByteBuffer) {
- ((MappedByteBuffer) filter).force();
- }
- }
-
- public void close() {
- if (filter != null) {
- force();
- }
- filter = null;
- forkedFilter = null;
- }
-
- @Override
- protected void finalize() {
- close();
- }
-}
Copied: trunk/freenet/src/freenet/support/BloomFilter.java (from rev 26320,
branches/db4o/freenet/src/freenet/support/BloomFilter.java)
===================================================================
--- trunk/freenet/src/freenet/support/BloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/BloomFilter.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,212 @@
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.util.Random;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.spaceroots.mantissa.random.MersenneTwister;
+
+public abstract class BloomFilter {
+ protected ByteBuffer filter;
+
+ /** Number of hash functions */
+ protected final int k;
+ protected final int length;
+
+ protected ReadWriteLock lock = new ReentrantReadWriteLock();
+
+ public static BloomFilter createFilter(int length, int k, boolean
counting) {
+ if (length == 0)
+ return new NullBloomFilter(length, k);
+ if (counting)
+ return new CountingBloomFilter(length, k);
+ else
+ return new BinaryBloomFilter(length, k);
+ }
+
+ public static BloomFilter createFilter(File file, int length, int k,
boolean counting) throws IOException {
+ if (length == 0)
+ return new NullBloomFilter(length, k);
+ if (counting)
+ return new CountingBloomFilter(file, length, k);
+ else
+ return new BinaryBloomFilter(file, length, k);
+ }
+
+ protected BloomFilter(int length, int k) {
+ if (length % 8 != 0)
+ length -= length % 8;
+
+ this.length = length;
+ this.k = k;
+ }
+
+ //-- Core
+ public void addKey(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.writeLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ setBit(hashes.nextInt(length));
+ } finally {
+ lock.writeLock().unlock();
+ }
+
+ if (forkedFilter != null)
+ forkedFilter.addKey(key);
+ }
+
+ // add to the forked filter only
+ public void addKeyForked(byte[] key) {
+ if (forkedFilter != null)
+ forkedFilter.addKey(key);
+ }
+
+ public boolean checkFilter(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.readLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ if (!getBit(hashes.nextInt(length)))
+ return false;
+ } finally {
+ lock.readLock().unlock();
+ }
+ return true;
+ }
+
+ public void removeKey(byte[] key) {
+ Random hashes = getHashes(key);
+ lock.writeLock().lock();
+ try {
+ for (int i = 0; i < k; i++)
+ unsetBit(hashes.nextInt(length));
+ } finally {
+ lock.writeLock().unlock();
+ }
+
+ if (forkedFilter != null)
+ forkedFilter.removeKey(key);
+ }
+
+ //-- Bits and Hashes
+ protected abstract boolean getBit(int offset);
+
+ protected abstract void setBit(int offset);
+
+ protected abstract void unsetBit(int offset);
+
+ // Wierd impl's should override
+ public void unsetAll() {
+ int x = filter.limit();
+ for(int i=0;i<x;i++)
+ filter.put(i, (byte)0);
+ }
+
+ protected Random getHashes(byte[] key) {
+ return new MersenneTwister(key);
+ }
+
+ //-- Fork & Merge
+ protected BloomFilter forkedFilter;
+
+ /**
+ * Create an empty, in-memory copy of bloom filter. New updates are
written to both filters.
+ * This is written back to disk on #merge()
+ */
+ public abstract void fork(int k);
+
+ public void merge() {
+ lock.writeLock().lock();
+ try {
+ if (forkedFilter == null)
+ return;
+
+ Lock forkedLock = forkedFilter.lock.writeLock();
+ forkedLock.lock();
+ try {
+ filter.position(0);
+ forkedFilter.filter.position(0);
+
+ filter.put(forkedFilter.filter);
+
+ filter.position(0);
+ forkedFilter.close();
+ forkedFilter = null;
+ } finally {
+ forkedLock.unlock();
+ }
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ public void discard() {
+ lock.writeLock().lock();
+ try {
+ if (forkedFilter == null)
+ return;
+ forkedFilter.close();
+ forkedFilter = null;
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ //-- Misc.
+ /**
+ * Calculate optimal K value
+ *
+ * @param filterLength
+ * filter length in bits
+ * @param maxKey
+ * @return optimal K
+ */
+ public static int optimialK(int filterLength, long maxKey) {
+ long k = Math.round(Math.log(2) * filterLength / maxKey);
+
+ if (k > 64)
+ k = 64;
+ if (k < 1)
+ k = 1;
+
+ return (int) k;
+ }
+
+ public int getK() {
+ return k;
+ }
+
+ protected boolean needRebuild;
+
+ public boolean needRebuild() {
+ boolean _needRebuild = needRebuild;
+ needRebuild = false;
+ return _needRebuild;
+
+ }
+
+ public void force() {
+ if (filter instanceof MappedByteBuffer) {
+ ((MappedByteBuffer) filter).force();
+ }
+ }
+
+ public void close() {
+ if (filter != null) {
+ force();
+ }
+ filter = null;
+ forkedFilter = null;
+ }
+
+ @Override
+ protected void finalize() {
+ close();
+ }
+}
Property changes on: trunk/freenet/src/freenet/support/BloomFilter.java
___________________________________________________________________
Added: svn:mergeinfo
+
Deleted: trunk/freenet/src/freenet/support/CountingBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/CountingBloomFilter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/CountingBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,102 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
- * Public License, version 2 (or at your option any later version). See
- * http://www.gnu.org/ for further details of the GPL. */
-package freenet.support;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel.MapMode;
-
-/**
- * @author sdiz
- */
-public class CountingBloomFilter extends BloomFilter {
- /**
- * Constructor
- *
- * @param length
- * length in bits
- */
- protected CountingBloomFilter(int length, int k) {
- super(length, k);
- filter = ByteBuffer.allocate(this.length / 4);
- }
-
- /**
- * Constructor
- *
- * @param file
- * disk file
- * @param length
- * length in bits
- * @throws IOException
- */
- protected CountingBloomFilter(File file, int length, int k) throws
IOException {
- super(length, k);
- int fileLength = length / 4;
- if (!file.exists() || file.length() != fileLength)
- needRebuild = true;
-
- RandomAccessFile raf = new RandomAccessFile(file, "rw");
- raf.setLength(fileLength);
- filter = raf.getChannel().map(MapMode.READ_WRITE, 0,
fileLength).load();
- }
-
- public CountingBloomFilter(int length, int k, byte[] buffer) {
- super(length, k);
- assert(buffer.length == length / 4);
- filter = ByteBuffer.wrap(buffer);
- }
-
- @Override
- public boolean getBit(int offset) {
- byte b = filter.get(offset / 4);
- byte v = (byte) ((b >>> offset % 4 * 2) & 3);
-
- return v != 0;
- }
-
- @Override
- public void setBit(int offset) {
- byte b = filter.get(offset / 4);
- byte v = (byte) ((b >>> offset % 4 * 2) & 3);
-
- if (v == 3)
- return; // overflow
-
- b &= ~(3 << offset % 4 * 2); // unset bit
- b |= (v + 1) << offset % 4 * 2; // set bit
-
- filter.put(offset / 4, b);
- }
-
- @Override
- public void unsetBit(int offset) {
- byte b = filter.get(offset / 4);
- byte v = (byte) ((b >>> offset % 4 * 2) & 3);
-
- if (v == 0 || v == 3)
- return; // overflow / underflow
-
- b &= ~(3 << offset % 4 * 2); // unset bit
- b |= (v - 1) << offset % 4 * 2; // set bit
-
- filter.put(offset / 4, b);
- }
-
- @Override
- public void fork(int k) {
- lock.writeLock().lock();
- try {
- File tempFile = File.createTempFile("bloom-", ".tmp");
- tempFile.deleteOnExit();
- forkedFilter = new CountingBloomFilter(tempFile,
length, k);
- } catch (IOException e) {
- forkedFilter = new CountingBloomFilter(length, k);
- } finally {
- lock.writeLock().unlock();
- }
- }
-}
Copied: trunk/freenet/src/freenet/support/CountingBloomFilter.java (from rev
26320, branches/db4o/freenet/src/freenet/support/CountingBloomFilter.java)
===================================================================
--- trunk/freenet/src/freenet/support/CountingBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/CountingBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,112 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel.MapMode;
+
+/**
+ * @author sdiz
+ */
+public class CountingBloomFilter extends BloomFilter {
+
+ private boolean warnOnRemoveFromEmpty;
+
+ public void setWarnOnRemoveFromEmpty() {
+ warnOnRemoveFromEmpty = true;
+ }
+
+ /**
+ * Constructor
+ *
+ * @param length
+ * length in bits
+ */
+ public CountingBloomFilter(int length, int k) {
+ super(length, k);
+ filter = ByteBuffer.allocate(this.length / 4);
+ }
+
+ /**
+ * Constructor
+ *
+ * @param file
+ * disk file
+ * @param length
+ * length in bits
+ * @throws IOException
+ */
+ protected CountingBloomFilter(File file, int length, int k) throws
IOException {
+ super(length, k);
+ int fileLength = length / 4;
+ if (!file.exists() || file.length() != fileLength)
+ needRebuild = true;
+
+ RandomAccessFile raf = new RandomAccessFile(file, "rw");
+ raf.setLength(fileLength);
+ filter = raf.getChannel().map(MapMode.READ_WRITE, 0,
fileLength).load();
+ }
+
+ public CountingBloomFilter(int length, int k, byte[] buffer) {
+ super(length, k);
+ assert(buffer.length == length / 4);
+ filter = ByteBuffer.wrap(buffer);
+ }
+
+ @Override
+ public boolean getBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ return v != 0;
+ }
+
+ @Override
+ public void setBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ if (v == 3)
+ return; // overflow
+
+ b &= ~(3 << offset % 4 * 2); // unset bit
+ b |= (v + 1) << offset % 4 * 2; // set bit
+
+ filter.put(offset / 4, b);
+ }
+
+ @Override
+ public void unsetBit(int offset) {
+ byte b = filter.get(offset / 4);
+ byte v = (byte) ((b >>> offset % 4 * 2) & 3);
+
+ if (v == 0 && warnOnRemoveFromEmpty)
+ Logger.error(this, "Unsetting bit but already unset -
probable double remove, can cause false negatives, is very bad!", new
Exception("error"));
+
+ if (v == 0 || v == 3)
+ return; // overflow / underflow
+
+ b &= ~(3 << offset % 4 * 2); // unset bit
+ b |= (v - 1) << offset % 4 * 2; // set bit
+
+ filter.put(offset / 4, b);
+ }
+
+ @Override
+ public void fork(int k) {
+ lock.writeLock().lock();
+ try {
+ File tempFile = File.createTempFile("bloom-", ".tmp");
+ tempFile.deleteOnExit();
+ forkedFilter = new CountingBloomFilter(tempFile,
length, k);
+ } catch (IOException e) {
+ forkedFilter = new CountingBloomFilter(length, k);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+}
Property changes on: trunk/freenet/src/freenet/support/CountingBloomFilter.java
___________________________________________________________________
Added: svn:mergeinfo
+
Copied: trunk/freenet/src/freenet/support/DebuggingHashMap.java (from rev
26320, branches/db4o/freenet/src/freenet/support/DebuggingHashMap.java)
===================================================================
--- trunk/freenet/src/freenet/support/DebuggingHashMap.java
(rev 0)
+++ trunk/freenet/src/freenet/support/DebuggingHashMap.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,63 @@
+package freenet.support;
+
+import java.util.HashMap;
+
+import com.db4o.ObjectContainer;
+
+
+ at SuppressWarnings("serial")
+public class DebuggingHashMap<K extends Object, V extends Object> extends
HashMap<K, V> {
+
+ private static volatile boolean logMINOR;
+
+ static {
+ Logger.registerLogThresholdCallback(new LogThresholdCallback() {
+
+ @Override
+ public void shouldUpdate() {
+ logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ }
+ });
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanUpdate() on
DebuggingHashMap "+this+" stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this)+" size="+size(), new
Exception("debug"));
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanNew() on DebuggingHashMap
"+this+" stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this)+" size="+size(), new
Exception("debug"));
+ return true;
+ }
+
+ public void objectOnUpdate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectOnUpdate() on
DebuggingHashMap "+this+" stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this)+" size="+size(), new
Exception("debug"));
+ }
+
+ public void objectOnNew(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectOnNew() on DebuggingHashMap
"+this+" stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this)+" size="+size(), new
Exception("debug"));
+ }
+
+ private transient boolean activating = false;
+
+ public boolean objectCanActivate(ObjectContainer container) {
+ if(logMINOR)
+ Logger.minor(this, "objectCanActivate() on
DebuggingHashMap stored="+container.ext().isStored(this)+"
active="+container.ext().isActive(this)+" size="+size(), new
Exception("debug"));
+
+ /** FIXME: This was an attempt to ensure we always activate to
depth 2. It didn't work. :( */
+
+// if(activating) {
+// activating = false;
+// return true;
+// }
+// activating = true;
+// container.activate(this, 2);
+// return false;
+ return true;
+ }
+
+}
\ No newline at end of file
Deleted: trunk/freenet/src/freenet/support/NullBloomFilter.java
===================================================================
--- trunk/freenet/src/freenet/support/NullBloomFilter.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/NullBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,59 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
- * Public License, version 2 (or at your option any later version). See
- * http://www.gnu.org/ for further details of the GPL. */
-package freenet.support;
-
-/**
- * @author sdiz
- */
-public class NullBloomFilter extends BloomFilter {
- protected NullBloomFilter(int length, int k) {
- super(length, k);
- }
-
- @Override
- public boolean checkFilter(byte[] key) {
- return true;
- }
-
- @Override
- public void addKey(byte[] key) {
- // ignore
- }
-
- @Override
- public void removeKey(byte[] key) {
- // ignore
- }
-
- @Override
- protected boolean getBit(int offset) {
- // ignore
- return true;
- }
-
- @Override
- protected void setBit(int offset) {
- // ignore
- }
-
- @Override
- protected void unsetBit(int offset) {
- // ignore
- }
-
- @Override
- public void fork(int k) {
- return;
- }
-
- @Override
- public void discard() {
- return;
- }
-
- @Override
- public void merge() {
- return;
- }
-}
Copied: trunk/freenet/src/freenet/support/NullBloomFilter.java (from rev 26320,
branches/db4o/freenet/src/freenet/support/NullBloomFilter.java)
===================================================================
--- trunk/freenet/src/freenet/support/NullBloomFilter.java
(rev 0)
+++ trunk/freenet/src/freenet/support/NullBloomFilter.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,59 @@
+/* This code is part of Freenet. It is distributed under the GNU General
+ * Public License, version 2 (or at your option any later version). See
+ * http://www.gnu.org/ for further details of the GPL. */
+package freenet.support;
+
+/**
+ * @author sdiz
+ */
+public class NullBloomFilter extends BloomFilter {
+ protected NullBloomFilter(int length, int k) {
+ super(length, k);
+ }
+
+ @Override
+ public boolean checkFilter(byte[] key) {
+ return true;
+ }
+
+ @Override
+ public void addKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ public void removeKey(byte[] key) {
+ // ignore
+ }
+
+ @Override
+ protected boolean getBit(int offset) {
+ // ignore
+ return true;
+ }
+
+ @Override
+ protected void setBit(int offset) {
+ // ignore
+ }
+
+ @Override
+ protected void unsetBit(int offset) {
+ // ignore
+ }
+
+ @Override
+ public void fork(int k) {
+ return;
+ }
+
+ @Override
+ public void discard() {
+ return;
+ }
+
+ @Override
+ public void merge() {
+ return;
+ }
+}
Copied: trunk/freenet/src/freenet/support/NullObject.java (from rev 26320,
branches/db4o/freenet/src/freenet/support/NullObject.java)
===================================================================
--- trunk/freenet/src/freenet/support/NullObject.java
(rev 0)
+++ trunk/freenet/src/freenet/support/NullObject.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -0,0 +1,15 @@
+package freenet.support;
+
+/**
+ * A null Object. Used where Object would be used, but can't be, because of
db4o's inability
+ * to store raw Object's. Usually this is used for synchronization in dual-use
(persistent or
+ * not) classes.
+ *
+ * See http://tracker.db4o.com/browse/COR-1314
+ * @author toad
+ */
+public class NullObject {
+
+ // Nothing
+
+}
Copied: trunk/freenet/src/freenet/support/PrioritizedSerialExecutor.java (from
rev 26320,
branches/db4o/freenet/src/freenet/support/PrioritizedSerialExecutor.java)
===================================================================
--- trunk/freenet/src/freenet/support/PrioritizedSerialExecutor.java
(rev 0)
+++ trunk/freenet/src/freenet/support/PrioritizedSerialExecutor.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,268 @@
+package freenet.support;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+
+import freenet.node.PrioRunnable;
+import freenet.support.io.NativeThread;
+
+public class PrioritizedSerialExecutor implements Executor {
+
+ private final LinkedList<Runnable>[] jobs;
+ private final int priority;
+ private final int defaultPriority;
+ private boolean waiting;
+ private final boolean invertOrder;
+ private final Map<String, Long> timeByJobClasses = new HashMap<String,
Long>();
+
+ private String name;
+ private Executor realExecutor;
+ private boolean running;
+
+ private static final int NEWJOB_TIMEOUT = 5*60*1000;
+
+ private final Runner runner = new Runner();
+
+ class Runner implements PrioRunnable {
+
+ Thread current;
+
+ public int getPriority() {
+ return priority;
+ }
+
+ public void run() {
+ long lastDumped = System.currentTimeMillis();
+ synchronized(jobs) {
+ if(current != null) {
+ if(current.isAlive()) {
+ Logger.error(this, "Already
running a thread for "+this+" !!", new Exception("error"));
+ return;
+ }
+ }
+ current = Thread.currentThread();
+ }
+ try {
+ while(true) {
+ boolean logMINOR =
Logger.shouldLog(Logger.MINOR, this);
+ Runnable job = null;
+ synchronized(jobs) {
+ job = checkQueue();
+ if(job == null) {
+ waiting = true;
+ try {
+ //NB: notify only on
adding work or this quits early.
+
jobs.wait(NEWJOB_TIMEOUT);
+ } catch (InterruptedException
e) {
+ // Ignore
+ }
+ waiting=false;
+ job = checkQueue();
+ if(job == null) {
+ running=false;
+ return;
+ }
+ }
+ }
+ try {
+ if(logMINOR)
+ Logger.minor(this, "Running job
"+job);
+ long start = System.currentTimeMillis();
+ job.run();
+ long end = System.currentTimeMillis();
+ if(logMINOR) {
+ Logger.minor(this, "Job "+job+"
took "+(end-start)+"ms");
+ synchronized(timeByJobClasses) {
+ String name = job.toString();
+ if(name.indexOf('@') > 0)
+ name =
name.substring(0, name.indexOf('@'));
+ Long l =
timeByJobClasses.get(name);
+ if(l != null) {
+ l = new
Long(l.longValue() + (end-start));
+ } else {
+ l = new Long(end-start);
+ }
+ timeByJobClasses.put(name, l);
+ if(logMINOR) {
+ Logger.minor(this,
"Total for class "+name+" : "+l);
+
if(System.currentTimeMillis() > (lastDumped + 60*1000)) {
+ Iterator i =
timeByJobClasses.entrySet().iterator();
+
while(i.hasNext()) {
+
Map.Entry e = (Map.Entry) i.next();
+
Logger.minor(this, "Class "+e.getKey()+" : total time "+e.getValue());
+ }
+ lastDumped =
System.currentTimeMillis();
+ }
+ }
+ }
+ }
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t, t);
+ Logger.error(this, "While running
"+job+" on "+this);
+ }
+ }
+ } finally {
+ synchronized(jobs) {
+ current = null;
+ running = false;
+ }
+ }
+ }
+
+ private Runnable checkQueue() {
+ if(!invertOrder) {
+ for(int i=0;i<jobs.length;i++) {
+ if(!jobs[i].isEmpty()) {
+
if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this,
"Chosen job at priority "+i);
+ return (Runnable)
jobs[i].removeFirst();
+ }
+ }
+ } else {
+ for(int i=jobs.length-1;i>=0;i--) {
+ if(!jobs[i].isEmpty()) {
+
if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this,
"Chosen job at priority "+i);
+ return (Runnable)
jobs[i].removeFirst();
+ }
+ }
+ }
+ return null;
+ }
+
+ };
+
+ /**
+ *
+ * @param priority
+ * @param internalPriorityCount
+ * @param defaultPriority
+ * @param invertOrder Set if the priorities are thread priorities.
Unset if they are request priorities. D'oh!
+ */
+ public PrioritizedSerialExecutor(int priority, int
internalPriorityCount, int defaultPriority, boolean invertOrder) {
+ jobs = new LinkedList[internalPriorityCount];
+ for(int i=0;i<jobs.length;i++)
+ jobs[i] = new LinkedList<Runnable>();
+ this.priority = priority;
+ this.defaultPriority = defaultPriority;
+ this.invertOrder = invertOrder;
+ }
+
+ public void start(Executor realExecutor, String name) {
+ this.realExecutor=realExecutor;
+ this.name=name;
+ synchronized (jobs) {
+ boolean empty = true;
+ for(int i=0;i<jobs.length;i++) {
+ if(!jobs[i].isEmpty()) {
+ empty = false;
+ break;
+ }
+ }
+ if(!empty)
+ reallyStart(Logger.shouldLog(Logger.MINOR,
this));
+ }
+ }
+
+ private void reallyStart(boolean logMINOR) {
+ synchronized(jobs) {
+ if(running) {
+ Logger.error(this, "Not reallyStart()ing:
ALREADY RUNNING", new Exception("error"));
+ return;
+ }
+ running=true;
+ if(logMINOR) Logger.minor(this, "Starting thread...
"+name+" : "+runner, new Exception("debug"));
+ realExecutor.execute(runner, name);
+ }
+ }
+
+ public void execute(Runnable job, String jobName) {
+ int prio = defaultPriority;
+ if(job instanceof PrioRunnable)
+ prio = ((PrioRunnable) job).getPriority();
+ execute(job, prio, jobName);
+ }
+
+ public void execute(Runnable job, int prio, String jobName) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ synchronized(jobs) {
+ if(logMINOR)
+ Logger.minor(this, "Running "+jobName+" :
"+job+" priority "+prio+" running="+running+" waiting="+waiting);
+ jobs[prio].addLast(job);
+ jobs.notifyAll();
+ if(!running && realExecutor != null) {
+ reallyStart(logMINOR);
+ }
+ }
+ }
+
+ public void executeNoDupes(Runnable job, int prio, String jobName) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ synchronized(jobs) {
+ if(logMINOR)
+ Logger.minor(this, "Running "+jobName+" :
"+job+" priority "+prio+" running="+running+" waiting="+waiting);
+ if(jobs[prio].contains(job)) {
+ if(logMINOR)
+ Logger.minor(this, "Not adding
duplicate job "+job);
+ return;
+ }
+ jobs[prio].addLast(job);
+ jobs.notifyAll();
+ if(!running && realExecutor != null) {
+ reallyStart(logMINOR);
+ }
+ }
+ }
+
+ public void execute(Runnable job, String jobName, boolean fromTicker) {
+ execute(job, jobName);
+ }
+
+ public int[] runningThreads() {
+ int[] retval = new int[NativeThread.JAVA_PRIORITY_RANGE+1];
+ if (running)
+ retval[priority] = 1;
+ return retval;
+ }
+
+ public int[] waitingThreads() {
+ int[] retval = new int[NativeThread.JAVA_PRIORITY_RANGE+1];
+ synchronized(jobs) {
+ if(waiting)
+ retval[priority] = 1;
+ }
+ return retval;
+ }
+
+ public boolean onThread() {
+ Thread running = Thread.currentThread();
+ synchronized(jobs) {
+ if(runner == null) return false;
+ return runner.current == running;
+ }
+ }
+
+ public int[] runningJobs() {
+ int[] retval = new int[jobs.length];
+ synchronized(jobs) {
+ for(int i=0;i<retval.length;i++)
+ retval[i] = jobs[i].size();
+ }
+ return retval;
+ }
+
+ public int getQueueSize(int priority) {
+ synchronized(jobs) {
+ return jobs[priority].size();
+ }
+ }
+
+ public int getWaitingThreadsCount() {
+ synchronized(jobs) {
+ return (waiting ? 1 : 0);
+ }
+ }
+
+}
Modified: trunk/freenet/src/freenet/support/RandomGrabArray.java
===================================================================
--- trunk/freenet/src/freenet/support/RandomGrabArray.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/RandomGrabArray.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,63 +1,137 @@
package freenet.support;
-import java.util.HashSet;
+import com.db4o.ObjectContainer;
-import freenet.crypt.RandomSource;
+import freenet.client.async.ClientContext;
/**
* An array which supports very fast remove-and-return-a-random-element.
*/
-public class RandomGrabArray {
+public class RandomGrabArray implements RemoveRandom {
- /** Array of items. Non-null's followed by null's. */
- private RandomGrabArrayItem[] reqs;
+ private class Block {
+ RandomGrabArrayItem[] reqs;
+ }
+
+ /** Array of items. Non-null's followed by null's.
+ * We used to have a Set so we could check whether something is in the
set quickly.
+ * We got rid of this because for persistent requests it is vastly
faster to just loop the
+ * loop and check ==, and for non-persistent requests it doesn't matter
much. */
+ private Block[] blocks;
/** Index of first null item. */
private int index;
- /** Random source */
- private RandomSource rand;
- /** What do we already have? FIXME: Replace with a Bloom filter or
something (to save
- * RAM), or rewrite the whole class as a custom hashset maybe based on
the classpath
- * HashSet. Note that removeRandom() is *the* common operation, so MUST
BE FAST.
- */
- private HashSet<RandomGrabArrayItem> contents;
private final static int MIN_SIZE = 32;
+ private final static int BLOCK_SIZE = 1024;
+ private final boolean persistent;
+ private final int hashCode;
+ private final RemoveRandomParent parent;
- public RandomGrabArray(RandomSource rand) {
- this.reqs = new RandomGrabArrayItem[MIN_SIZE];
+ public RandomGrabArray(boolean persistent, ObjectContainer container,
RemoveRandomParent parent) {
+ this.blocks = new Block[] { new Block() };
+ blocks[0].reqs = new RandomGrabArrayItem[MIN_SIZE];
+ this.persistent = persistent;
index = 0;
- this.rand = rand;
- contents = new HashSet<RandomGrabArrayItem>();
+ this.hashCode = super.hashCode();
+ this.parent = parent;
}
- public void add(RandomGrabArrayItem req) {
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public void add(RandomGrabArrayItem req, ObjectContainer container) {
+ if(req.persistent() != persistent) throw new
IllegalArgumentException("req.persistent()="+req.persistent()+" but
array.persistent="+persistent+" item="+req+" array="+this);
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(req.isEmpty()) {
+ if(req.isEmpty(container)) {
if(logMINOR) Logger.minor(this, "Is finished already:
"+req);
return;
}
- req.setParentGrabArray(this);
+ req.setParentGrabArray(this, container);
synchronized(this) {
- if(contents.contains(req)) {
- if(logMINOR) Logger.minor(this, "Already
contains "+req+" : "+this+" size now "+index);
+ int x = 0;
+ int size = 0;
+ if(blocks.length == 1 && index < BLOCK_SIZE) {
+ if(persistent) container.activate(blocks[0], 1);
+ for(int i=0;i<index;i++) {
+ if(blocks[0].reqs[i] == req) {
+ if(persistent)
container.deactivate(blocks[0], 1);
+ return;
+ }
+ }
+ if(index >= blocks[0].reqs.length) {
+ int newSize = Math.min(BLOCK_SIZE,
blocks[0].reqs.length*2);
+ RandomGrabArrayItem[] newReqs = new
RandomGrabArrayItem[newSize];
+ System.arraycopy(blocks[0].reqs, 0,
newReqs, 0, blocks[0].reqs.length);
+ blocks[0].reqs = newReqs;
+ }
+ blocks[0].reqs[index++] = req;
+ if(persistent) {
+ container.store(blocks[0]);
+ container.store(this);
+ container.deactivate(blocks[0], 1);
+ }
return;
}
- contents.add(req);
- if(index >= reqs.length) {
- RandomGrabArrayItem[] r = new
RandomGrabArrayItem[reqs.length*2];
- System.arraycopy(reqs, 0, r, 0, reqs.length);
- reqs = r;
+ int targetBlock = index / BLOCK_SIZE;
+ for(int i=0;i<blocks.length;i++) {
+ Block block = blocks[i];
+ if(persistent) container.activate(block, 1);
+ if(i != (blocks.length - 1) &&
block.reqs.length != BLOCK_SIZE) {
+ Logger.error(this, "Block "+i+" of
"+blocks.length+" is wrong size: "+block.reqs.length+" should be "+BLOCK_SIZE);
+ }
+ for(int j=0;j<block.reqs.length;j++) {
+ if(x >= index) break;
+ if(block.reqs[j] == req) {
+ if(logMINOR) Logger.minor(this,
"Already contains "+req+" : "+this+" size now "+index);
+ if(persistent)
container.deactivate(block, 1);
+ return;
+ }
+ if(block.reqs[j] == null) {
+ Logger.error(this,
"reqs["+i+"."+j+"] = null on "+this);
+ }
+ x++;
+ }
+ if(persistent && i != targetBlock)
container.deactivate(block, 1);
}
- reqs[index++] = req;
+ int oldBlockLen = blocks.length;
+ if(blocks.length <= targetBlock) {
+ if(logMINOR)
+ Logger.minor(this, "Adding blocks on
"+this);
+ Block[] newBlocks = new Block[targetBlock + 1];
+ System.arraycopy(blocks, 0, newBlocks, 0,
blocks.length);
+ for(int i=blocks.length;i<newBlocks.length;i++)
{
+ newBlocks[i] = new Block();
+ newBlocks[i].reqs = new
RandomGrabArrayItem[BLOCK_SIZE];
+ }
+ blocks = newBlocks;
+ } else {
+ if(persistent)
+ container.activate(blocks[targetBlock],
1);
+ }
+ Block target = blocks[targetBlock];
+ target.reqs[index++ % BLOCK_SIZE] = req;
+ if(persistent) {
+ for(int i=oldBlockLen;i<blocks.length;i++)
+ container.store(blocks[i]);
+ container.store(this);
+ container.store(target);
+ for(int i=oldBlockLen;i<blocks.length;i++)
+ container.deactivate(blocks[i], 1);
+ }
if(logMINOR) Logger.minor(this, "Added: "+req+" to
"+this+" size now "+index);
}
}
- public RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding) {
+ public RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding, ObjectContainer
container, ClientContext context) {
RandomGrabArrayItem ret, oret;
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "removeRandom() on "+this+"
index="+index);
synchronized(this) {
+ int lastActiveBlock = -1;
+ /** Must be less than BLOCK_SIZE */
final int MAX_EXCLUDED = 10;
int excluded = 0;
+ boolean changedMe = false;
while(true) {
if(index == 0) {
if(logMINOR) Logger.minor(this, "All
null on "+this);
@@ -66,147 +140,385 @@
if(index < MAX_EXCLUDED) {
// Optimise the common case of not many
items, and avoid some spurious errors.
int random = -1;
+ if(persistent)
container.activate(blocks[0], 1);
+ RandomGrabArrayItem[] reqs =
blocks[0].reqs;
while(true) {
int exclude = 0;
int valid = 0;
int validIndex = -1;
int target = 0;
int chosenIndex = -1;
+ RandomGrabArrayItem chosenItem
= null;
+ RandomGrabArrayItem validItem =
null;
for(int i=0;i<index;i++) {
+ // Compact the array.
RandomGrabArrayItem
item = reqs[i];
+ if(persistent)
+
container.activate(item, 1);
if(item == null) {
continue;
- } else
if(item.isEmpty()) {
+ } else
if(item.isEmpty(container)) {
+ changedMe =
true;
+ // We are doing
compaction here. We don't need to swap with the end; we write valid ones to the
target location.
reqs[i] = null;
-
contents.remove(item);
+
item.setParentGrabArray(null, container);
+ if(persistent)
+
container.deactivate(item, 1);
continue;
}
if(i != target) {
+ changedMe =
true;
reqs[i] = null;
reqs[target] =
item;
- }
+ } // else the request
can happily stay where it is
target++;
-
if(excluding.exclude(item)) {
+
if(excluding.exclude(item, container, context)) {
exclude++;
} else {
if(valid ==
random) { // Picked on previous round
chosenIndex = target-1;
+
chosenItem = item;
}
- validIndex =
target-1;
+ if(validIndex
== -1) {
+ // Take
the first valid item
+
validIndex = target-1;
+
validItem = item;
+ }
valid++;
}
+ if(persistent && item
!= chosenItem && item != validItem) {
+ if(logMINOR)
+
Logger.minor(this, "Deactivating "+item);
+
container.deactivate(item, 1);
+
if(container.ext().isActive(item))
+
Logger.error(this, "Still active after deactivation: "+item);
+ else
if(logMINOR)
+
Logger.minor(this, "Deactivated: "+item);
+ }
}
- index = target;
+ if(index != target) {
+ changedMe = true;
+ index = target;
+ }
// We reach this point if 1)
the random number we picked last round is invalid because an item became
cancelled or excluded
// or 2) we are on the first
round anyway.
- if(chosenIndex >= 0) {
- ret = reqs[chosenIndex];
- if(ret.canRemove()) {
-
contents.remove(ret);
- if(chosenIndex
!= index-1) {
-
reqs[chosenIndex] = reqs[index-1];
- }
- index--;
-
ret.setParentGrabArray(null);
+ if(chosenItem != null) {
+ if(persistent &&
validItem != null && validItem != chosenItem)
+
container.deactivate(validItem, 1);
+ changedMe = true;
+ ret = chosenItem;
+ assert(ret ==
reqs[chosenIndex]);
+ if(logMINOR)
Logger.minor(this, "Chosen random item "+ret+" out of "+valid+" total "+index);
+ if(persistent &&
changedMe) {
+
container.store(blocks[0]);
+
container.store(this);
}
- if(logMINOR)
Logger.minor(this, "Chosen random item "+ret+" out of "+valid);
return ret;
}
if(valid == 0 && exclude == 0) {
index = 0;
- if(logMINOR)
Logger.minor(this, "No valid or excluded items");
+ if(persistent) {
+
container.store(blocks[0]);
+
container.store(this);
+ }
+ if(logMINOR)
Logger.minor(this, "No valid or excluded items total "+index);
return null;
} else if(valid == 0) {
- if(logMINOR)
Logger.minor(this, "No valid items, "+exclude+" excluded items");
+ if(persistent &&
changedMe) {
+
container.store(blocks[0]);
+
container.store(this);
+ }
+ if(logMINOR)
Logger.minor(this, "No valid items, "+exclude+" excluded items total "+index);
return null;
} else if(valid == 1) {
- ret = reqs[validIndex];
- if(ret.canRemove()) {
-
contents.remove(ret);
- if(validIndex
!= index-1) {
-
reqs[validIndex] = reqs[index-1];
- }
- index--;
- if(logMINOR)
Logger.minor(this, "No valid or excluded items after removing "+ret);
-
ret.setParentGrabArray(null);
- } else {
- if(logMINOR)
Logger.minor(this, "No valid or excluded items apart from "+ret);
+ ret = validItem;
+ assert(ret ==
reqs[validIndex]);
+ if(logMINOR)
Logger.minor(this, "No valid or excluded items apart from "+ret+" total
"+index);
+ if(persistent &&
changedMe) {
+
container.store(blocks[0]);
+
container.store(this);
}
return ret;
} else {
- random =
rand.nextInt(valid);
+ random =
context.fastWeakRandom.nextInt(valid);
}
}
}
- int i = rand.nextInt(index);
- ret = reqs[i];
+ int i = context.fastWeakRandom.nextInt(index);
+ int blockNo = i / BLOCK_SIZE;
+ if(persistent && blockNo != lastActiveBlock) {
+ if(lastActiveBlock != -1)
+
container.deactivate(blocks[lastActiveBlock], 1);
+ lastActiveBlock = blockNo;
+ container.activate(blocks[blockNo], 1);
+ }
+ ret = blocks[blockNo].reqs[i % BLOCK_SIZE];
if(ret == null) {
Logger.error(this, "reqs["+i+"] =
null");
- index--;
- if(i != index) {
- reqs[i] = reqs[index];
- reqs[index] = null;
- }
+ remove(blockNo, i, container);
+ changedMe = true;
continue;
}
+ if(persistent)
+ container.activate(ret, 1);
oret = ret;
- if(ret.isEmpty()) {
+ if(ret.isEmpty(container)) {
if(logMINOR) Logger.minor(this, "Not
returning because cancelled: "+ret);
ret = null;
+ // Will be removed in the do{} loop
+ // Tell it that it's been removed first.
+ oret.setParentGrabArray(null,
container);
}
- if(ret != null && excluding.exclude(ret)) {
+ if(ret != null && excluding.exclude(ret,
container, context)) {
excluded++;
+ if(persistent)
+ container.deactivate(ret, 1);
if(excluded > MAX_EXCLUDED) {
- Logger.error(this, "Remove
random returning null because "+excluded+" excluded items, length = "+index,
new Exception("error"));
+ Logger.normal(this, "Remove
random returning null because "+excluded+" excluded items, length = "+index,
new Exception("error"));
+ if(persistent && changedMe)
+ container.store(this);
return null;
}
continue;
}
- if(ret != null && !ret.canRemove()) {
+ if(ret != null) {
if(logMINOR) Logger.minor(this,
"Returning (cannot remove): "+ret+" of "+index);
+ if(persistent && changedMe)
+ container.store(this);
return ret;
}
+ // Remove an element.
do {
- reqs[i] = reqs[--index];
- reqs[index] = null;
- if(oret != null)
- contents.remove(oret);
- oret = reqs[i];
- // May as well check whether that is
cancelled too.
- } while (index > i && (oret == null ||
oret.isEmpty()));
+ changedMe = true;
+ remove(blockNo, i, container);
+ if(persistent && oret != null && ret ==
null) // if ret != null we will return it
+ container.deactivate(oret, 1);
+ oret = blocks[blockNo].reqs[i %
BLOCK_SIZE];
+ // Check for nulls, but don't check for
cancelled, since we'd have to activate.
+ } while (index > i && oret == null);
// Shrink array
- if((index < reqs.length / 4) && (reqs.length >
MIN_SIZE)) {
+ if(blocks.length == 1 && index <
blocks[0].reqs.length / 4) {
+ changedMe = true;
// Shrink array
int newSize = Math.max(index * 2,
MIN_SIZE);
RandomGrabArrayItem[] r = new
RandomGrabArrayItem[newSize];
- System.arraycopy(reqs, 0, r, 0,
r.length);
- reqs = r;
+ System.arraycopy(blocks[0].reqs, 0, r,
0, r.length);
+ blocks[0].reqs = r;
+ if(persistent)
+ container.store(this);
+ } else if(blocks.length > 1 &&
+ (((index + (BLOCK_SIZE/2)) /
BLOCK_SIZE) + 1) <
+ blocks.length) {
+ if(logMINOR)
+ Logger.minor(this, "Shrinking
blocks on "+this);
+ Block[] newBlocks = new Block[((index +
(BLOCK_SIZE/2)) / BLOCK_SIZE) + 1];
+ System.arraycopy(blocks, 0, newBlocks,
0, newBlocks.length);
+ if(persistent) {
+ container.store(this);
+ for(int
x=newBlocks.length;x<blocks.length;x++)
+
container.delete(blocks[x]);
+ }
+ blocks = newBlocks;
}
- if((ret != null) && !ret.isEmpty()) break;
+ if(ret != null) break;
}
}
if(logMINOR) Logger.minor(this, "Returning "+ret+" of "+index);
- ret.setParentGrabArray(null);
+ ret.setParentGrabArray(null, container);
+ if(persistent)
+ container.store(this);
return ret;
}
- public void remove(RandomGrabArrayItem it) {
+ /**
+ * blockNo is assumed to be already active. The last block is assumed
not
+ * to be.
+ */
+ private void remove(int blockNo, int i, ObjectContainer container) {
+ index--;
+ int endBlock = index / BLOCK_SIZE;
+ if(blocks.length == 1 || blockNo == endBlock) {
+ RandomGrabArrayItem[] items = blocks[blockNo].reqs;
+ int idx = index % BLOCK_SIZE;
+ items[i % BLOCK_SIZE] = items[idx];
+ items[idx] = null;
+ if(persistent)
+ container.store(blocks[blockNo]);
+ } else {
+ RandomGrabArrayItem[] toItems = blocks[blockNo].reqs;
+ if(persistent) container.activate(blocks[endBlock], 1);
+ RandomGrabArrayItem[] endItems = blocks[endBlock].reqs;
+ toItems[i % BLOCK_SIZE] = endItems[index % BLOCK_SIZE];
+ endItems[index % BLOCK_SIZE] = null;
+ if(persistent) {
+ container.store(blocks[blockNo]);
+ container.store(blocks[endBlock]);
+ container.deactivate(blocks[endBlock], 1);
+ }
+ }
+ }
+
+ /**
+ * FIXME: This does not remove from higher level structures! This will
only
+ * be removed from the SectoredRGA the next time the request selection
loop
+ * runs - and if there are higher priority requests, and hard
priorities are
+ * enabled, it may not reach this.
+ * @param it
+ * @param container
+ */
+ public void remove(RandomGrabArrayItem it, ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing "+it+" from "+this);
+ boolean matched = false;
+ boolean empty = false;
synchronized(this) {
- if(!contents.contains(it)) return;
- contents.remove(it);
- for(int i=0;i<index;i++) {
- if(reqs[i] == null) continue;
- if((reqs[i] == it) || reqs[i].equals(it)) {
- reqs[i] = reqs[--index];
- reqs[index] = null;
- break;
+ if(blocks.length == 1) {
+ Block block = blocks[0];
+ if(persistent)
+ container.activate(block, 1);
+ for(int i=0;i<index;i++) {
+ if(block.reqs[i] == it) {
+ block.reqs[i] =
block.reqs[--index];
+ block.reqs[index] = null;
+ matched = true;
+ if(persistent)
+ container.store(block);
+ break;
+ }
}
+ if(index == 0) empty = true;
+ if(persistent)
+ container.deactivate(block, 1);
+ } else {
+ int x = 0;
+ for(int i=0;i<blocks.length;i++) {
+ Block block = blocks[i];
+ if(persistent)
+ container.activate(block, 1);
+ for(int j=0;j<block.reqs.length;j++) {
+ if(x >= index) break;
+ x++;
+ if(block.reqs[i] == it) {
+ int pullFrom = --index;
+ int idx = pullFrom %
BLOCK_SIZE;
+ int endBlock = pullFrom
/ BLOCK_SIZE;
+ if(i == endBlock) {
+ block.reqs[j] =
block.reqs[idx];
+ block.reqs[idx]
= null;
+ } else {
+ Block fromBlock
= blocks[endBlock];
+ if(persistent)
+
container.activate(fromBlock, 1);
+ block.reqs[j] =
fromBlock.reqs[idx];
+
fromBlock.reqs[idx] = null;
+ if(persistent) {
+
container.store(fromBlock);
+
container.deactivate(fromBlock, 1);
+ }
+ }
+ if(persistent)
+
container.store(block);
+ matched = true;
+ break;
+ }
+ }
+ if(persistent)
+ container.deactivate(block, 1);
+ }
+ if(index == 0) empty = true;
}
}
- it.setParentGrabArray(null);
+ if(it.getParentGrabArray() == this)
+ it.setParentGrabArray(null, container);
+ else
+ Logger.error(this, "Removing item "+it+" from "+this+"
but RGA is "+it.getParentGrabArray(), new Exception("debug"));
+ if(!matched) return;
+ if(persistent) {
+ container.store(this);
+ }
+ if(empty && parent != null) {
+ boolean active = true;
+ if(persistent) active =
container.ext().isActive(parent);
+ if(!active) container.activate(parent, 1);
+ parent.maybeRemove(this, container);
+ if(!active) container.deactivate(parent, 1);
+ }
}
public synchronized boolean isEmpty() {
return index == 0;
}
+
+ public boolean persistent() {
+ return persistent;
+ }
+
+ public boolean contains(RandomGrabArrayItem item, ObjectContainer
container) {
+ synchronized(this) {
+ if(blocks.length == 1) {
+ Block block = blocks[0];
+ if(persistent)
+ container.activate(block, 1);
+ for(int i=0;i<index;i++) {
+ if(block.reqs[i] == item) {
+ if(persistent)
+
container.deactivate(block, 1);
+ return true;
+ }
+ }
+ if(persistent)
+ container.deactivate(block, 1);
+ } else {
+ int x = 0;
+ for(int i=0;i<blocks.length;i++) {
+ Block block = blocks[i];
+ if(persistent)
+ container.activate(block, 1);
+ for(int j=0;j<block.reqs.length;j++) {
+ if(x >= index) break;
+ x++;
+ if(block.reqs[i] == item) {
+ if(persistent)
+
container.deactivate(block, 1);
+ return true;
+ }
+ }
+ if(persistent)
+ container.deactivate(block, 1);
+ }
+ }
+ }
+ return false;
+ }
+
+ public synchronized int size() {
+ return index;
+ }
+
+ public synchronized RandomGrabArrayItem get(int idx, ObjectContainer
container) {
+ int blockNo = idx / BLOCK_SIZE;
+ if(persistent)
+ container.activate(blocks[blockNo], 1);
+ RandomGrabArrayItem item = blocks[blockNo].reqs[idx %
BLOCK_SIZE];
+ if(persistent)
+ container.deactivate(blocks[blockNo], 1);
+ return item;
+ }
+
+
+ public void removeFrom(ObjectContainer container) {
+ if(blocks != null) {
+ for(Block block : blocks) {
+ container.activate(block, 1);
+ for(RandomGrabArrayItem item : block.reqs) {
+ if(item != null) {
+ Logger.error(this, "VALID ITEM
WHILE DELETING BLOCK: "+item+" on "+this);
+ return;
+ }
+ }
+ container.delete(block);
+ }
+ }
+ container.delete(this);
+ }
}
Modified: trunk/freenet/src/freenet/support/RandomGrabArrayItem.java
===================================================================
--- trunk/freenet/src/freenet/support/RandomGrabArrayItem.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/RandomGrabArrayItem.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,5 +1,7 @@
package freenet.support;
+import com.db4o.ObjectContainer;
+
public interface RandomGrabArrayItem {
/** If true, will be automatically removed from the RGA, and not
returned.
@@ -10,21 +12,19 @@
*
* LOCKING: Should hold as few locks as possible as this needs to be
called while
* holding the RGA lock(s). */
- public boolean isEmpty();
+ public boolean isEmpty(ObjectContainer container);
- /** Can this item be removed from the queue after it has been handled?
- * Called immediately after finding a request to remove.
- * If returns false, the item will remain in the queue and may be
chosen again.
- * Note that in the case of SendableGet's, this is called before
chooseKey(), so
- * it needs to return true if there are less than two requests on this
object. */
- public boolean canRemove();
-
/** Does this RandomGrabArrayItem support remembering where it is
registered? */
public boolean knowsParentGrabArray();
/** Notify the item that it has been registered on a specific
RandomGrabArray */
- public void setParentGrabArray(RandomGrabArray parent);
+ public void setParentGrabArray(RandomGrabArray parent, ObjectContainer
container);
/** If the item remembers its parent RandomGrabArray, return it */
public RandomGrabArray getParentGrabArray();
+
+ /** This must be the same as the value passed into the RGA constructor.
+ * If the user doesn't implement persistence, simply return false here
and
+ * pass false into the constructor. */
+ public boolean persistent();
}
Modified:
trunk/freenet/src/freenet/support/RandomGrabArrayItemExclusionList.java
===================================================================
--- trunk/freenet/src/freenet/support/RandomGrabArrayItemExclusionList.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/RandomGrabArrayItemExclusionList.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -3,11 +3,15 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.support;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+
public interface RandomGrabArrayItemExclusionList {
/**
* Whether this item can be returned right now.
*/
- public boolean exclude(RandomGrabArrayItem item);
+ public boolean exclude(RandomGrabArrayItem item, ObjectContainer
container, ClientContext context);
}
Modified: trunk/freenet/src/freenet/support/RandomGrabArrayWithClient.java
===================================================================
--- trunk/freenet/src/freenet/support/RandomGrabArrayWithClient.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/RandomGrabArrayWithClient.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,13 +1,13 @@
package freenet.support;
-import freenet.crypt.RandomSource;
+import com.db4o.ObjectContainer;
public class RandomGrabArrayWithClient extends RandomGrabArray implements
RemoveRandomWithObject {
final Object client;
- public RandomGrabArrayWithClient(Object client, RandomSource rand) {
- super(rand);
+ public RandomGrabArrayWithClient(Object client, boolean persistent,
ObjectContainer container, RemoveRandomParent parent) {
+ super(persistent, container, parent);
this.client = client;
}
Modified: trunk/freenet/src/freenet/support/RemoveRandom.java
===================================================================
--- trunk/freenet/src/freenet/support/RemoveRandom.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/support/RemoveRandom.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -1,8 +1,16 @@
package freenet.support;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+
public interface RemoveRandom {
/** Remove and return a random RandomGrabArrayItem. Should be fast. */
- public RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding);
+ public RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding, ObjectContainer
container, ClientContext context);
+
+ /** Just for consistency checking */
+ public boolean persistent();
+ public void removeFrom(ObjectContainer container);
}
Copied: trunk/freenet/src/freenet/support/RemoveRandomParent.java (from rev
26320, branches/db4o/freenet/src/freenet/support/RemoveRandomParent.java)
===================================================================
--- trunk/freenet/src/freenet/support/RemoveRandomParent.java
(rev 0)
+++ trunk/freenet/src/freenet/support/RemoveRandomParent.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,13 @@
+package freenet.support;
+
+import com.db4o.ObjectContainer;
+
+public interface RemoveRandomParent {
+
+ /** If the specified RemoveRandom is empty, remove it.
+ * LOCKING: Must be called with no locks held, particularly no locks on
the
+ * RemoveRandom, because we take locks in order!
+ */
+ public void maybeRemove(RemoveRandom r, ObjectContainer container);
+
+}
Modified: trunk/freenet/src/freenet/support/RemoveRandomWithObject.java
===================================================================
--- trunk/freenet/src/freenet/support/RemoveRandomWithObject.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/RemoveRandomWithObject.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,9 +1,13 @@
package freenet.support;
+import com.db4o.ObjectContainer;
+
public interface RemoveRandomWithObject extends RemoveRandom {
public Object getObject();
public boolean isEmpty();
+
+ public void removeFrom(ObjectContainer container);
}
Modified: trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
===================================================================
--- trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SectoredRandomGrabArray.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,70 +1,126 @@
package freenet.support;
-import java.util.HashMap;
+import com.db4o.ObjectContainer;
-import freenet.crypt.RandomSource;
+import freenet.client.async.ClientContext;
/**
* Like RandomGrabArray, but there is an equal chance of any given client's
requests being
* returned.
*/
-public class SectoredRandomGrabArray implements RemoveRandom {
+public class SectoredRandomGrabArray implements RemoveRandom,
RemoveRandomParent {
- private final HashMap<Object, RemoveRandomWithObject>
grabArraysByClient;
+ /*
+ * Yes, this is O(n). No, I don't care.
+ *
+ * Using a Db4oMap results in stuff getting reactivated during the
commit
+ * phase, and not deactivated. This makes keeping stuff that shouldn't
be
+ * activated deactivated impossible, resulting in more memory usage.
more
+ * Full GC's, more object churn, and hence more CPU usage. Also Db4oMap
is
+ * deprecated.
+ *
+ * Using a HashMap populated in objectOnActivate() doesn't work either,
+ * because it ends up comparing deactivated clients with activated ones.
+ * This will result in NPEs, and unnecessary code complexity to fix
them.
+ *
+ * IMHO it's not worth bothering with a hashtable if it's less than 1000
+ * or so items anyway. If size does become a problem we will need to
+ * implement our own activation aware hashtable class, which stores the
+ * full hashCode, and matches on == object identity, so that we don't
need
+ * to activate on comparison.
+ */
private RemoveRandomWithObject[] grabArrays;
- private final RandomSource rand;
+ private Object[] grabClients;
+ private final boolean persistent;
+ private final RemoveRandomParent parent;
- public SectoredRandomGrabArray(RandomSource rand) {
- this.rand = rand;
- this.grabArraysByClient = new HashMap<Object,
RemoveRandomWithObject>();
+ public SectoredRandomGrabArray(boolean persistent, ObjectContainer
container, RemoveRandomParent parent) {
+ this.persistent = persistent;
+ grabClients = new Object[0];
grabArrays = new RemoveRandomWithObject[0];
+ this.parent = parent;
}
/**
* Add directly to a RandomGrabArrayWithClient under us. */
- public synchronized void add(Object client, RandomGrabArrayItem item) {
+ public synchronized void add(Object client, RandomGrabArrayItem item,
ObjectContainer container) {
+ if(item.persistent() != persistent) throw new
IllegalArgumentException("item.persistent()="+item.persistent()+" but
array.persistent="+persistent+" item="+item+" array="+this);
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
RandomGrabArrayWithClient rga;
- if(!grabArraysByClient.containsKey(client)) {
+ int clientIndex = haveClient(client);
+ if(clientIndex == -1) {
if(logMINOR)
Logger.minor(this, "Adding new RGAWithClient
for "+client+" on "+this+" for "+item);
- rga = new RandomGrabArrayWithClient(client, rand);
- RemoveRandomWithObject[] newArrays = new
RemoveRandomWithObject[grabArrays.length+1];
- System.arraycopy(grabArrays, 0, newArrays, 0,
grabArrays.length);
- newArrays[grabArrays.length] = rga;
- grabArrays = newArrays;
- grabArraysByClient.put(client, rga);
+ rga = new RandomGrabArrayWithClient(client, persistent,
container, this);
+ addElement(client, rga);
+ if(persistent) {
+ container.store(rga);
+ container.store(this);
+ }
} else {
- rga = (RandomGrabArrayWithClient)
grabArraysByClient.get(client);
+ rga = (RandomGrabArrayWithClient)
grabArrays[clientIndex];
+ if(persistent)
+ container.activate(rga, 1);
}
if(logMINOR)
Logger.minor(this, "Adding "+item+" to RGA "+rga+" for
"+client);
- rga.add(item);
+ // rga is auto-activated to depth 1...
+ rga.add(item, container);
+ if(persistent)
+ // now deactivate to save memory
+ container.deactivate(rga, 1);
if(logMINOR)
Logger.minor(this, "Size now "+grabArrays.length+" on
"+this);
}
+ private void addElement(Object client, RemoveRandomWithObject rga) {
+ int len = grabArrays.length;
+ RemoveRandomWithObject[] newArrays = new
RemoveRandomWithObject[len+1];
+ System.arraycopy(grabArrays, 0, newArrays, 0, len);
+ newArrays[len] = rga;
+ grabArrays = newArrays;
+
+ Object[] newClients = new Object[len+1];
+ System.arraycopy(grabClients, 0, newClients, 0, len);
+ newClients[len] = client;
+ grabClients = newClients;
+ }
+
+ private synchronized int haveClient(Object client) {
+ for(int i=0;i<grabClients.length;i++) {
+ if(grabClients[i] == client) return i;
+ }
+ return -1;
+ }
+
/**
* Get a grabber. This lets us use things other than
RandomGrabArrayWithClient's, so don't mix calls
* to add() with calls to getGrabber/addGrabber!
*/
public synchronized RemoveRandomWithObject getGrabber(Object client) {
- return grabArraysByClient.get(client);
+ int idx = haveClient(client);
+ if(idx == -1) return null;
+ else return grabArrays[idx];
}
+
+ public synchronized Object getClient(int x) {
+ return grabClients[x];
+ }
/**
* Put a grabber. This lets us use things other than
RandomGrabArrayWithClient's, so don't mix calls
* to add() with calls to getGrabber/addGrabber!
*/
- public synchronized void addGrabber(Object client,
RemoveRandomWithObject requestGrabber) {
- grabArraysByClient.put(client, requestGrabber);
- RemoveRandomWithObject[] newArrays = new
RemoveRandomWithObject[grabArrays.length+1];
- System.arraycopy(grabArrays, 0, newArrays, 0,
grabArrays.length);
- newArrays[grabArrays.length] = requestGrabber;
- grabArrays = newArrays;
+ public synchronized void addGrabber(Object client,
RemoveRandomWithObject requestGrabber, ObjectContainer container) {
+ if(requestGrabber.getObject() != client)
+ throw new IllegalArgumentException("Client not equal to
RemoveRandomWithObject's client: client="+client+" rr="+requestGrabber+" his
object="+requestGrabber.getObject());
+ addElement(client, requestGrabber);
+ if(persistent) {
+ container.store(this);
+ }
}
- public synchronized RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding) {
+ public synchronized RandomGrabArrayItem
removeRandom(RandomGrabArrayItemExclusionList excluding, ObjectContainer
container, ClientContext context) {
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
/** Count of arrays that have items but didn't return anything
because of exclusions */
int excluded = 0;
@@ -74,87 +130,196 @@
if(grabArrays.length == 1) {
// Optimise the common case
RemoveRandomWithObject rga = grabArrays[0];
- RandomGrabArrayItem item =
rga.removeRandom(excluding);
+ if(persistent)
+ container.activate(rga, 1);
+ RandomGrabArrayItem item =
rga.removeRandom(excluding, container, context);
if(rga.isEmpty()) {
if(logMINOR)
- Logger.minor(this, "Removing
only grab array (0) : "+rga+" for "+rga.getObject()+" (is empty)");
- Object client = rga.getObject();
- grabArraysByClient.remove(client);
+ Logger.minor(this, "Removing
only grab array (0) : "+rga);
grabArrays = new
RemoveRandomWithObject[0];
+ grabClients = new Object[0];
+ if(persistent) {
+ container.store(this);
+ rga.removeFrom(container);
+ }
}
if(logMINOR)
- Logger.minor(this, "Returning (one item
only) "+item+" for "+rga+" for "+rga.getObject());
+ Logger.minor(this, "Returning (one item
only) "+item+" for "+rga);
return item;
}
if(grabArrays.length == 2) {
// Another simple common case
- int x = rand.nextBoolean() ? 1 : 0;
+ int x = context.fastWeakRandom.nextBoolean() ?
1 : 0;
RemoveRandomWithObject rga = grabArrays[x];
+ if(persistent)
+ container.activate(rga, 1);
RemoveRandomWithObject firstRGA = rga;
- RandomGrabArrayItem item =
rga.removeRandom(excluding);
+ if(rga == null) {
+ Logger.error(this, "rga = null on
"+this);
+ if(container != null &&
!container.ext().isActive(this))
+ Logger.error(this, "NOT
ACTIVE!!");
+ if(grabArrays[1-x] == null) {
+ Logger.error(this, "other rga
is also null on "+this);
+ } else {
+ RemoveRandomWithObject valid =
grabArrays[1-x];
+ Logger.error(this,
"grabArrays["+(1-x)+"] is valid but ["+x+"] is null, correcting...");
+ grabArrays = new
RemoveRandomWithObject[] { grabArrays[1-x] };
+ grabClients = new Object[] {
grabClients[1-x] };
+ continue;
+ }
+ }
+ RandomGrabArrayItem item =
rga.removeRandom(excluding, container, context);
if(item == null) {
x = 1-x;
rga = grabArrays[x];
- item = rga.removeRandom(excluding);
+ if(persistent)
+ container.activate(rga, 1);
+ item = rga.removeRandom(excluding,
container, context);
if(firstRGA.isEmpty() && rga.isEmpty())
{
-
grabArraysByClient.remove(rga.getObject());
-
grabArraysByClient.remove(firstRGA.getObject());
grabArrays = new
RemoveRandomWithObject[0];
+ grabClients = new Object[0];
+ if(persistent) {
+ container.store(this);
+
firstRGA.removeFrom(container);
+
rga.removeFrom(container);
+ }
} else if(firstRGA.isEmpty()) {
-
grabArraysByClient.remove(firstRGA.getObject());
+ if(persistent) {
+
container.activate(firstRGA, 1);
+ }
grabArrays = new
RemoveRandomWithObject[] { rga };
+ grabClients = new Object[] {
grabClients[x] };
+ if(persistent) {
+ container.store(this);
+
firstRGA.removeFrom(container);
+ }
}
+ if(persistent) {
+ container.deactivate(rga, 1);
+ container.deactivate(firstRGA,
1);
+ }
if(logMINOR)
- Logger.minor(this, "Returning
(two items only) "+item+" for "+rga+" for "+rga.getObject());
+ Logger.minor(this, "Returning
(two items only) "+item+" for "+rga);
return item;
} else {
+ if(persistent)
+ container.deactivate(rga, 1);
if(logMINOR)
- Logger.minor(this, "Returning
(two items only) "+item+" for "+rga+" for "+rga.getObject());
+ Logger.minor(this, "Returning
(two items only) "+item+" for "+rga);
return item;
}
}
- int x = rand.nextInt(grabArrays.length);
+ int x =
context.fastWeakRandom.nextInt(grabArrays.length);
RemoveRandomWithObject rga = grabArrays[x];
+ if(persistent)
+ container.activate(rga, 1);
if(logMINOR)
- Logger.minor(this, "Picked "+x+" of
"+grabArrays.length+" : "+rga+" : "+rga.getObject()+" on "+this);
- RandomGrabArrayItem item = rga.removeRandom(excluding);
+ Logger.minor(this, "Picked "+x+" of
"+grabArrays.length+" : "+rga+" on "+this);
+ RandomGrabArrayItem item = rga.removeRandom(excluding,
container, context);
if(logMINOR)
Logger.minor(this, "RGA has picked
"+x+"/"+grabArrays.length+": "+item+
- (item==null ? "" : ("
cancelled="+item.isEmpty()+")"))+" rga.isEmpty="+rga.isEmpty());
+ (item==null ? "" : ("
cancelled="+item.isEmpty(container)+")"))+" rga.isEmpty="+rga.isEmpty());
// Just because the item is cancelled does not
necessarily mean the whole client is.
// E.g. a segment may return cancelled because it is
decoding, that doesn't mean
// other segments are cancelled. So just go around the
loop in that case.
- final int grabArraysLength = grabArrays.length;
if(rga.isEmpty()) {
if(logMINOR)
- Logger.minor(this, "Removing grab array
"+x+" : "+rga+" for "+rga.getObject()+" (is empty)");
- Object client = rga.getObject();
- grabArraysByClient.remove(client);
- RemoveRandomWithObject[] newArray = new
RemoveRandomWithObject[grabArraysLength > 1 ? grabArraysLength-1 : 0];
- if(x > 0)
- System.arraycopy(grabArrays, 0,
newArray, 0, x);
- if(x < grabArraysLength-1)
- System.arraycopy(grabArrays, x+1,
newArray, x, grabArraysLength - (x+1));
- grabArrays = newArray;
+ Logger.minor(this, "Removing grab array
"+x+" : "+rga+" (is empty)");
+ removeElement(x);
+ if(persistent) {
+ container.store(this);
+ rga.removeFrom(container);
+ }
}
if(item == null) {
if(!rga.isEmpty()) {
// Hmmm...
excluded++;
if(excluded > MAX_EXCLUDED) {
- Logger.normal(this, "Too many
sub-arrays are entirely excluded on "+this+" length = "+grabArraysLength, new
Exception("error"));
+ Logger.normal(this, "Too many
sub-arrays are entirely excluded on "+this+" length = "+grabArrays.length, new
Exception("error"));
+ if(persistent)
+
container.deactivate(rga, 1);
return null;
}
}
+ if(persistent)
+ container.deactivate(rga, 1);
continue;
}
- if(item.isEmpty()) continue;
+ if(persistent)
+ container.deactivate(rga, 1);
+ if(item.isEmpty(container)) continue;
return item;
}
}
+ private void removeElement(int x) {
+ final int grabArraysLength = grabArrays.length;
+ int newLen = grabArraysLength > 1 ? grabArraysLength-1 : 0;
+ RemoveRandomWithObject[] newArray = new
RemoveRandomWithObject[newLen];
+ if(x > 0)
+ System.arraycopy(grabArrays, 0, newArray, 0, x);
+ if(x < grabArraysLength-1)
+ System.arraycopy(grabArrays, x+1, newArray, x,
grabArraysLength - (x+1));
+ grabArrays = newArray;
+
+ Object[] newClients = new Object[newLen];
+ if(x > 0)
+ System.arraycopy(grabClients, 0, newClients, 0, x);
+ if(x < grabArraysLength-1)
+ System.arraycopy(grabClients, x+1, newClients, x,
grabArraysLength - (x+1));
+ grabClients = newClients;
+ }
+
public synchronized boolean isEmpty() {
return grabArrays.length == 0;
}
+ public boolean persistent() {
+ return persistent;
+ }
+
+ public int size() {
+ return grabArrays.length;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(grabArrays != null && grabArrays.length != 0) {
+ for(RemoveRandomWithObject rr : grabArrays) {
+ if(rr != null) {
+ Logger.error(this, "NOT EMPTY REMOVING
"+this+" : "+rr);
+ return;
+ }
+ }
+ }
+ container.delete(this);
+ }
+
+ public void maybeRemove(RemoveRandom r, ObjectContainer container) {
+ int count = 0;
+ while(true) {
+ int found = -1;
+ synchronized(this) {
+ for(int i=0;i<grabArrays.length;i++) {
+ if(grabArrays[i] == r) {
+ found = i;
+ break;
+ }
+ }
+ }
+ if(found != -1) {
+ count++;
+ if(count > 1) Logger.error(this, "Found "+r+" many
times in "+this, new Exception("error"));
+ removeElement(found);
+ } else {
+ if(count == 0) Logger.error(this, "Not in parent: "+r+"
for "+this, new Exception("error"));
+ else if(persistent) {
+ container.store(this);
+ r.removeFrom(container);
+ }
+ return;
+ }
+ }
+ }
+
}
Modified: trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithInt.java
===================================================================
--- trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithInt.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithInt.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,13 +1,13 @@
package freenet.support;
-import freenet.crypt.RandomSource;
+import com.db4o.ObjectContainer;
public class SectoredRandomGrabArrayWithInt extends SectoredRandomGrabArray
implements IntNumberedItem {
private final int number;
- public SectoredRandomGrabArrayWithInt(RandomSource rand, int number) {
- super(rand);
+ public SectoredRandomGrabArrayWithInt(int number, boolean persistent,
ObjectContainer container, RemoveRandomParent parent) {
+ super(persistent, container, parent);
this.number = number;
}
Modified:
trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithObject.java
===================================================================
--- trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithObject.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SectoredRandomGrabArrayWithObject.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,13 +1,13 @@
package freenet.support;
-import freenet.crypt.RandomSource;
+import com.db4o.ObjectContainer;
public class SectoredRandomGrabArrayWithObject extends SectoredRandomGrabArray
implements RemoveRandomWithObject {
private final Object object;
- public SectoredRandomGrabArrayWithObject(Object object, RandomSource
rand) {
- super(rand);
+ public SectoredRandomGrabArrayWithObject(Object object, boolean
persistent, ObjectContainer container, RemoveRandomParent parent) {
+ super(persistent, container, parent);
this.object = object;
}
@@ -19,5 +19,5 @@
public String toString() {
return super.toString()+":"+object;
}
-
+
}
Modified: trunk/freenet/src/freenet/support/SimpleFieldSet.java
===================================================================
--- trunk/freenet/src/freenet/support/SimpleFieldSet.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SimpleFieldSet.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -21,6 +21,8 @@
import java.util.Iterator;
import java.util.Map;
+import com.db4o.ObjectContainer;
+
import freenet.node.FSParseException;
import freenet.support.io.Closer;
import freenet.support.io.LineReader;
@@ -892,4 +894,12 @@
return s;
}
+ public void removeFrom(ObjectContainer container) {
+ container.delete(values);
+ for(SimpleFieldSet fs : subsets.values())
+ fs.removeFrom(container);
+ container.delete(subsets);
+ container.delete(this);
+ }
+
}
Modified: trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -5,6 +5,8 @@
import java.io.InputStream;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.support.api.Bucket;
/**
@@ -57,4 +59,21 @@
// Do nothing
}
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
+ public Bucket createShadow() throws IOException {
+ if(buf.length < 256*1024) {
+ byte[] newBuf = new byte[length];
+ System.arraycopy(buf, offset, newBuf, 0, length);
+ return new SimpleReadOnlyArrayBucket(newBuf);
+ }
+ return null;
+ }
+
}
Modified: trunk/freenet/src/freenet/support/SortedVectorByNumber.java
===================================================================
--- trunk/freenet/src/freenet/support/SortedVectorByNumber.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/SortedVectorByNumber.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -3,6 +3,8 @@
import java.util.Arrays;
import java.util.Comparator;
+import com.db4o.ObjectContainer;
+
/**
* Map of an integer to an element, based on a sorted Vector.
* Note that we have to shuffle data around, so this is slowish if it gets big.
@@ -13,10 +15,12 @@
private int length;
private static final Comparator<Object> comparator = new
SimpleIntNumberedItemComparator(true);
private static final int MIN_SIZE = 4;
+ private final boolean persistent;
- public SortedVectorByNumber() {
+ public SortedVectorByNumber(boolean persistent) {
this.data = new IntNumberedItem[MIN_SIZE];
length = 0;
+ this.persistent = persistent;
}
public synchronized IntNumberedItem getFirst() {
@@ -28,14 +32,24 @@
return length == 0;
}
- public synchronized IntNumberedItem get(int item) {
- int x = Arrays.binarySearch(data, item, comparator);
+ public synchronized IntNumberedItem get(int retryCount, ObjectContainer
container) {
+ if(persistent) {
+ container.activate(this, 1);
+ for(int i=0;i<length;i++)
+ container.activate(data[i], 1);
+ }
+ int x = Arrays.binarySearch(data, retryCount, comparator);
if(x >= 0)
return data[x];
return null;
}
- public synchronized void remove(int item) {
+ public synchronized void remove(int item, ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ for(int i=0;i<length;i++)
+ container.activate(data[i], 1);
+ }
int x = Arrays.binarySearch(data, item, comparator);
if(x >= 0) {
if(x < length-1)
@@ -47,16 +61,19 @@
System.arraycopy(data, 0, newData, 0, length);
data = newData;
}
- verify();
+ if(persistent) container.store(this);
+ verify(container);
}
- private synchronized void verify() {
+ private synchronized void verify(ObjectContainer container) {
IntNumberedItem lastItem = null;
for(int i=0;i<length;i++) {
IntNumberedItem item = data[i];
+ if(persistent)
+ container.activate(data[i], 1);
if(i>0) {
if(item.getNumber() <= lastItem.getNumber())
- throw new IllegalStateException("Verify
failed!");
+ throw new IllegalStateException("Verify
failed! at "+i+" this="+item.getNumber()+" but last="+lastItem.getNumber());
}
lastItem = item;
}
@@ -69,16 +86,26 @@
* Add the item, if it (or an item of the same number) is not already
present.
* @return True if we added the item.
*/
- public synchronized boolean push(IntNumberedItem grabber) {
+ public synchronized boolean push(IntNumberedItem grabber,
ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ for(int i=0;i<length;i++)
+ container.activate(data[i], 1);
+ }
int x = Arrays.binarySearch(data, grabber.getNumber(),
comparator);
if(x >= 0) return false;
// insertion point
x = -x-1;
- push(grabber, x);
+ push(grabber, x, container);
return true;
}
- public synchronized void add(IntNumberedItem grabber) {
+ public synchronized void add(IntNumberedItem grabber, ObjectContainer
container) {
+ if(persistent) {
+ container.activate(this, 1);
+ for(int i=0;i<length;i++)
+ container.activate(data[i], 1);
+ }
int x = Arrays.binarySearch(data, grabber.getNumber(),
comparator);
if(x >= 0) {
if(grabber != data[x])
@@ -87,10 +114,15 @@
}
// insertion point
x = -x-1;
- push(grabber, x);
+ push(grabber, x, container);
}
- private synchronized void push(IntNumberedItem grabber, int x) {
+ private synchronized void push(IntNumberedItem grabber, int x,
ObjectContainer container) {
+ if(persistent) {
+ container.activate(this, 1);
+ for(int i=0;i<length;i++)
+ container.activate(data[i], 1);
+ }
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
if(logMINOR) Logger.minor(this, "Insertion point: "+x);
// Move the data
@@ -104,7 +136,9 @@
System.arraycopy(data, x, data, x+1, length-x);
data[x] = grabber;
length++;
- verify();
+ if(persistent)
+ container.store(this);
+ verify(container);
}
public synchronized int count() {
@@ -116,4 +150,13 @@
return data[index];
}
+ public int getNumberByIndex(int idx) {
+ if(idx >= length) return Integer.MAX_VALUE;
+ return data[idx].getNumber();
+ }
+
+ public boolean persistent() {
+ return persistent;
+ }
+
}
Modified: trunk/freenet/src/freenet/support/TransferThread.java
===================================================================
--- trunk/freenet/src/freenet/support/TransferThread.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/TransferThread.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -6,6 +6,8 @@
import java.util.Collection;
import java.util.Iterator;
+import com.db4o.ObjectContainer;
+
import freenet.client.FetchException;
import freenet.client.FetchResult;
import freenet.client.HighLevelSimpleClient;
@@ -109,7 +111,7 @@
synchronized(mFetches) {
Iterator<ClientGetter> r = mFetches.iterator();
int rcounter = 0;
- while (r.hasNext()) { r.next().cancel();
r.remove(); ++rcounter; }
+ while (r.hasNext()) { r.next().cancel(null,
mNode.clientCore.clientContext); r.remove(); ++rcounter; }
Logger.debug(this, "Stopped " + rcounter + "
current requests");
}
@@ -117,7 +119,7 @@
synchronized(mInserts) {
Iterator<BaseClientPutter> i =
mInserts.iterator();
int icounter = 0;
- while (i.hasNext()) { i.next().cancel();
i.remove(); ++icounter; }
+ while (i.hasNext()) { i.next().cancel(null,
mNode.clientCore.clientContext); i.remove(); ++icounter; }
Logger.debug(this, "Stopped " + icounter + "
current inserts");
}
}
@@ -212,4 +214,9 @@
* disk, if it is a persistent request. */
public abstract void onMajorProgress();
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing TransferThread in database",
new Exception("error"));
+ return false;
+ }
+
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/support/api/Bucket.java
===================================================================
--- trunk/freenet/src/freenet/support/api/Bucket.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/support/api/Bucket.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -3,6 +3,8 @@
* http://www.gnu.org/ for further details of the GPL. */
package freenet.support.api;
import java.io.*;
+
+import com.db4o.ObjectContainer;
/**
* A bucket is any arbitrary object can temporarily store data.
*
@@ -49,5 +51,29 @@
* Free the bucket, if supported.
*/
public void free();
+
+ /**
+ * Write the bucket and all its dependancies to the database.
+ * Update the stored copy and its dependancies if necessary.
+ */
+ public void storeTo(ObjectContainer container);
+ /**
+ * Remove the bucket and everything under it from the database.
+ * You don't need to call this if it hasn't been storeTo()'ed: buckets
that use the database internally
+ * will run a blocking job to delete internal structure in free().
+ * @param container The database.
+ */
+ public void removeFrom(ObjectContainer container);
+
+ /**
+ * Create a shallow read-only copy of this bucket, using different
+ * objects but using the same external storage. If this is not
possible,
+ * return null. Note that if the underlying bucket is deleted, the copy
+ * will become invalid and probably throw an IOException on read, or
+ * possibly return too-short data etc. In some use cases e.g. on
fproxy,
+ * this is acceptable.
+ */
+ public Bucket createShadow() throws IOException;
+
}
Modified: trunk/freenet/src/freenet/support/api/BucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/api/BucketFactory.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/api/BucketFactory.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -5,8 +5,17 @@
import java.io.IOException;
+import freenet.support.io.SegmentedBucketChainBucketKillJob;
+
public interface BucketFactory {
+ /**
+ * Create a bucket.
+ * @param size The maximum size of the data, or -1 if we don't know.
+ * Some buckets will throw IOException if you go over this length.
+ * @return
+ * @throws IOException
+ */
public Bucket makeBucket(long size) throws IOException;
}
Modified: trunk/freenet/src/freenet/support/compress/CompressJob.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/CompressJob.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/compress/CompressJob.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,9 +1,10 @@
package freenet.support.compress;
import freenet.client.InsertException;
+import freenet.client.async.ClientContext;
import freenet.client.async.ClientPutState;
public interface CompressJob {
- public abstract void tryCompress() throws InsertException;
- public abstract void onFailure(InsertException e, ClientPutState c);
+ public abstract void tryCompress(ClientContext context) throws
InsertException;
+ public abstract void onFailure(InsertException e, ClientPutState c,
ClientContext context);
}
Modified: trunk/freenet/src/freenet/support/compress/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Compressor.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -53,32 +53,6 @@
return compressor.decompress(dbuf, i, j, output);
}
-
- public static final Semaphore compressorSemaphore = new
Semaphore(getMaxRunningCompressionThreads());
-
- private static int getMaxRunningCompressionThreads() {
- int maxRunningThreads = 1;
-
- String osName = System.getProperty("os.name");
- if(osName.indexOf("Windows") == -1 &&
(osName.toLowerCase().indexOf("mac os x") > 0) ||
(!NativeThread.usingNativeCode()))
- // OS/X niceness is really weak, so we don't
want any more background CPU load than necessary
- // Also, on non-Windows, we need the native
threads library to be working.
- maxRunningThreads = 1;
- else {
- // Most other OSs will have reasonable
niceness, so go by RAM.
- Runtime r = Runtime.getRuntime();
- int max = r.availableProcessors(); // FIXME
this may change in a VM, poll it
- long maxMemory = r.maxMemory();
- if(maxMemory < 128 * 1024 * 1024)
- max = 1;
- else
- // one compressor thread per (128MB of
ram + available core)
- max = Math.min(max, (int)
(Math.min(Integer.MAX_VALUE, maxMemory / (128 * 1024 * 1024))));
- maxRunningThreads = max;
- }
- Logger.minor(COMPRESSOR_TYPE.class, "Maximum Compressor
threads: " + maxRunningThreads);
- return maxRunningThreads;
- }
}
/**
Modified: trunk/freenet/src/freenet/support/compress/GzipCompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -12,6 +12,7 @@
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
+import freenet.support.io.Closer;
import freenet.support.io.CountedOutputStream;
public class GzipCompressor implements Compressor {
@@ -57,12 +58,19 @@
output = preferred;
else
output = bf.makeBucket(maxLength);
- InputStream is = data.getInputStream();
- OutputStream os = output.getOutputStream();
+ InputStream is = null;
+ OutputStream os = null;
+ try {
+ is = data.getInputStream();
+ os = output.getOutputStream();
decompress(is, os, maxLength, maxCheckSizeLength);
- os.close();
- is.close();
+ os.close(); os = null;
+ is.close(); is = null;
return output;
+ } finally {
+ if(is != null) Closer.close(is);
+ if(os != null) Closer.close(os);
+ }
}
private long decompress(InputStream is, OutputStream os, long
maxLength, long maxCheckSizeBytes) throws IOException,
CompressionOutputSizeException {
Modified: trunk/freenet/src/freenet/support/compress/RealCompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/RealCompressor.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/compress/RealCompressor.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -4,21 +4,32 @@
package freenet.support.compress;
import freenet.client.InsertException;
+import freenet.client.async.ClientContext;
import freenet.node.PrioRunnable;
import freenet.support.Executor;
import freenet.support.Logger;
import freenet.support.OOMHandler;
+import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
import freenet.support.io.NativeThread;
import java.util.LinkedList;
+import java.util.concurrent.Semaphore;
+import com.db4o.ObjectContainer;
+
public class RealCompressor implements PrioRunnable {
private final Executor exec;
+ private ClientContext context;
private static final LinkedList<CompressJob> _awaitingJobs = new
LinkedList<CompressJob>();
+ public static final Semaphore compressorSemaphore = new
Semaphore(getMaxRunningCompressionThreads());
public RealCompressor(Executor e) {
this.exec = e;
}
+
+ public void setClientContext(ClientContext context) {
+ this.context = context;
+ }
public int getPriority() {
return NativeThread.HIGH_PRIORITY;
@@ -43,7 +54,7 @@
continue;
}
}
-
Compressor.COMPRESSOR_TYPE.compressorSemaphore.acquire();
+ compressorSemaphore.acquire();
} catch(InterruptedException e) {
Logger.error(this, "caught: "+e.getMessage(),
e);
continue;
@@ -55,26 +66,26 @@
freenet.support.Logger.OSThread.logPID(this);
try {
try {
-
finalJob.tryCompress();
+
finalJob.tryCompress(context);
} catch(InsertException
e) {
-
finalJob.onFailure(e, null);
+
finalJob.onFailure(e, null, context);
}
catch(OutOfMemoryError e) {
OOMHandler.handleOOM(e);
System.err.println("OffThreadCompressor thread above failed.");
// Might not be
heap, so try anyway
-
finalJob.onFailure(new InsertException(InsertException.INTERNAL_ERROR, e,
null), null);
+
finalJob.onFailure(new InsertException(InsertException.INTERNAL_ERROR, e,
null), null, context);
} catch(Throwable t) {
Logger.error(this, "Caught in OffThreadCompressor: " + t, t);
System.err.println("Caught in OffThreadCompressor: " + t);
t.printStackTrace();
// Try to fail
gracefully
-
finalJob.onFailure(new InsertException(InsertException.INTERNAL_ERROR, t,
null), null);
+
finalJob.onFailure(new InsertException(InsertException.INTERNAL_ERROR, t,
null), null, context);
}
} catch(Throwable t) {
Logger.error(this, "Caught " +
t + " in " + this, t);
} finally {
-
Compressor.COMPRESSOR_TYPE.compressorSemaphore.release();
+ compressorSemaphore.release();
}
}
@@ -84,4 +95,33 @@
}, "Compressor thread for " + currentJob);
}
}
+
+ public boolean objectCanNew(ObjectContainer container) {
+ Logger.error(this, "Not storing NodeClientCore in database",
new Exception("error"));
+ return false;
+ }
+
+ private static int getMaxRunningCompressionThreads() {
+ int maxRunningThreads = 1;
+
+ String osName = System.getProperty("os.name");
+ if(osName.indexOf("Windows") == -1 &&
(osName.toLowerCase().indexOf("mac os x") > 0) ||
(!NativeThread.usingNativeCode()))
+ // OS/X niceness is really weak, so we don't want any
more background CPU load than necessary
+ // Also, on non-Windows, we need the native threads
library to be working.
+ maxRunningThreads = 1;
+ else {
+ // Most other OSs will have reasonable niceness, so go
by RAM.
+ Runtime r = Runtime.getRuntime();
+ int max = r.availableProcessors(); // FIXME this may
change in a VM, poll it
+ long maxMemory = r.maxMemory();
+ if(maxMemory < 128 * 1024 * 1024)
+ max = 1;
+ else
+ // one compressor thread per (128MB of ram +
available core)
+ max = Math.min(max, (int)
(Math.min(Integer.MAX_VALUE, maxMemory / (128 * 1024 * 1024))));
+ maxRunningThreads = max;
+ }
+ Logger.minor(RealCompressor.class, "Maximum Compressor threads:
" + maxRunningThreads);
+ return maxRunningThreads;
+ }
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/support/io/ArrayBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/ArrayBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/ArrayBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -6,6 +6,8 @@
import java.io.InputStream;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.support.api.Bucket;
/**
@@ -93,4 +95,18 @@
System.arraycopy(data, 0, buf, 0, size);
return buf;
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(data);
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(data);
+ container.delete(this);
+ }
+
+ public Bucket createShadow() throws IOException {
+ return null;
+ }
}
Modified: trunk/freenet/src/freenet/support/io/BaseFileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BaseFileBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/BaseFileBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -42,22 +42,25 @@
/** Vector of streams (FileBucketInputStream or FileBucketOutputStream)
which
* are open to this file. So we can be sure they are all closed when we
free it.
* Can be null. */
- private Vector<Object> streams;
+ private transient Vector<Object> streams;
protected static String tempDir = null;
- public BaseFileBucket(File file) {
+ public BaseFileBucket(File file, boolean deleteOnExit) {
if(file == null) throw new NullPointerException();
this.length = file.length();
- if(deleteOnExit()) {
- try {
- file.deleteOnExit();
- } catch (NullPointerException e) {
-
if(WrapperManager.hasShutdownHookBeenTriggered()) {
- Logger.normal(this,
"NullPointerException setting deleteOnExit while shutting down - buggy JVM
code: "+e, e);
- } else {
- Logger.error(this, "Caught "+e+" doing
deleteOnExit() for "+file+" - JVM bug ????");
- }
+ if(deleteOnExit)
+ file.deleteOnExit();
+ }
+
+ protected void setDeleteOnExit(File file) {
+ try {
+ file.deleteOnExit();
+ } catch (NullPointerException e) {
+ if(WrapperManager.hasShutdownHookBeenTriggered()) {
+ Logger.normal(this, "NullPointerException
setting deleteOnExit while shutting down - buggy JVM code: "+e, e);
+ } else {
+ Logger.error(this, "Caught "+e+" doing
deleteOnExit() for "+file+" - JVM bug ????");
}
}
}
@@ -66,7 +69,7 @@
synchronized (this) {
File file = getFile();
if(freed)
- throw new IOException("File already freed");
+ throw new IOException("File already freed:
"+this);
if(isReadOnly())
throw new IOException("Bucket is read-only:
"+this);
@@ -74,7 +77,7 @@
throw new FileExistsException(file);
if(streams != null && !streams.isEmpty())
- Logger.error(this, "Streams open on "+this+"
while opening an output stream!: "+streams);
+ Logger.error(this, "Streams open on "+this+"
while opening an output stream!: "+streams, new Exception("debug"));
File tempfile = createFileOnly() ? getTempfile() : file;
long streamNumber = ++fileRestartCounter;
@@ -146,7 +149,7 @@
throws FileNotFoundException {
super(tempfile, false);
if(logMINOR)
- Logger.minor(this, "Writing to "+tempfile+" for
"+getFile());
+ Logger.minor(this, "Writing to "+tempfile+" for
"+getFile()+" : "+this);
this.tempfile = tempfile;
resetLength();
this.restartCount = restartCount;
@@ -261,7 +264,7 @@
public synchronized InputStream getInputStream() throws IOException {
if(freed)
- throw new IOException("File already freed");
+ throw new IOException("File already freed: "+this);
File file = getFile();
if(!file.exists()) {
Logger.normal(this, "File does not exist: "+file+" for
"+this);
@@ -405,6 +408,8 @@
public void free(boolean forceFree) {
Object[] toClose;
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Freeing "+this, new
Exception("debug"));
synchronized(this) {
if(freed) return;
freed = true;
@@ -442,7 +447,13 @@
@Override
public synchronized String toString() {
- return super.toString()+ ':'
+getFile().getPath()+":streams="+(streams == null ? 0 : streams.size());
+ StringBuffer sb = new StringBuffer();
+ sb.append(super.toString());
+ sb.append(':');
+ sb.append(getFile().getPath());
+ sb.append(":streams=");
+ sb.append(streams == null ? 0 : streams.size());
+ return sb.toString();
}
/**
Copied: trunk/freenet/src/freenet/support/io/BucketArrayWrapper.java (from rev
26320, branches/db4o/freenet/src/freenet/support/io/BucketArrayWrapper.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketArrayWrapper.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/BucketArrayWrapper.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,9 @@
+package freenet.support.io;
+
+import freenet.support.api.Bucket;
+
+public class BucketArrayWrapper {
+
+ public Bucket[] buckets;
+
+}
Modified: trunk/freenet/src/freenet/support/io/BucketChainBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,6 +9,12 @@
import java.io.OutputStream;
import java.util.Vector;
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
@@ -20,7 +26,14 @@
private boolean freed;
private boolean readOnly;
private final BucketFactory bf;
-
+ boolean stored;
+
+ /**
+ * @param bucketSize
+ * @param bf
+ * @param dbJobRunner If not null, use this to store buckets to disk
progressively
+ * to avoid a big transaction at the end. Caller then MUST call
storeTo() at some point.
+ */
public BucketChainBucket(long bucketSize, BucketFactory bf) {
this.bucketSize = bucketSize;
this.buckets = new Vector<Bucket>();
@@ -30,6 +43,14 @@
readOnly = false;
}
+ private BucketChainBucket(Vector newBuckets, long bucketSize2, long
size2, boolean readOnly, BucketFactory bf2) {
+ this.buckets = newBuckets;
+ this.bucketSize = bucketSize2;
+ this.size = size2;
+ this.readOnly = readOnly;
+ this.bf = bf2;
+ }
+
public void free() {
Bucket[] list;
synchronized(this) {
@@ -43,7 +64,7 @@
}
/** Equivalent to free(), but don't free the underlying buckets. */
- public void clear() {
+ void clear() {
synchronized(this) {
size = 0;
buckets.clear();
@@ -57,7 +78,7 @@
public InputStream getInputStream() throws IOException {
synchronized(this) {
if(freed) throw new IOException("Freed");
- }
+ if(buckets.size() == 0) return new NullInputStream();
return new InputStream() {
private int bucketNo = 0;
@@ -89,6 +110,10 @@
} catch (EOFException e) {
// Handle the same
}
+ synchronized(BucketChainBucket.this) {
+ // No more data to read at the
moment.
+ if(readBytes >= size) return -1;
+ }
bucketNo++;
curBucketStream.close();
curBucketStream =
getBucketInputStream(bucketNo++);
@@ -133,6 +158,10 @@
} catch (EOFException e) {
// Handle the same
}
+ synchronized(BucketChainBucket.this) {
+ // No more data to read at the
moment.
+ if(readBytes >= size) return -1;
+ }
bucketNo++;
curBucketStream.close();
curBucketStream =
getBucketInputStream(bucketNo++);
@@ -158,6 +187,7 @@
}
};
+ }
}
protected synchronized InputStream getBucketInputStream(int i) throws
IOException {
@@ -262,15 +292,20 @@
}
protected OutputStream makeBucketOutputStream(int i) throws IOException
{
- Bucket bucket = bf.makeBucket(bucketSize);
- buckets.add(bucket);
- if (buckets.size() != i + 1)
- throw new IllegalStateException("Added bucket, size
should be " + (i + 1) + " but is " + buckets.size());
- if (buckets.get(i) != bucket)
- throw new IllegalStateException("Bucket got replaced.
Race condition?");
+ Bucket bucket;
+ synchronized(this) {
+ bucket = bf.makeBucket(bucketSize);
+ buckets.add(bucket);
+ if (buckets.size() != i + 1)
+ throw new IllegalStateException("Added bucket,
size should be " + (i + 1) + " but is " + buckets.size());
+ if (buckets.get(i) != bucket)
+ throw new IllegalStateException("Bucket got
replaced. Race condition?");
+ }
return bucket.getOutputStream();
}
+ private int storedTo = 0;
+
public boolean isReadOnly() {
return readOnly;
}
@@ -283,4 +318,36 @@
return size;
}
+ public void storeTo(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Bucket createShadow() throws IOException {
+ Vector newBuckets = new Vector();
+ for(int i=0;i<buckets.size();i++) {
+ Bucket data = (Bucket) buckets.get(i);
+ Bucket shadow = data.createShadow();
+ if(shadow == null) {
+ // Shadow buckets don't need to be freed.
+ return null;
+ }
+ newBuckets.add(shadow);
+ }
+ return new BucketChainBucket(newBuckets, bucketSize, size,
true, bf);
+ }
+
+ // For debugging
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ return true;
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ return true;
+ }
+
}
Deleted: trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,22 +0,0 @@
-package freenet.support.io;
-
-import java.io.IOException;
-
-import freenet.support.api.Bucket;
-import freenet.support.api.BucketFactory;
-
-public class BucketChainBucketFactory implements BucketFactory {
-
- final BucketFactory factory;
- final int blockSize;
-
- public BucketChainBucketFactory(BucketFactory bucketFactory, int
block_size) {
- this.factory = bucketFactory;
- this.blockSize = block_size;
- }
-
- public Bucket makeBucket(long size) throws IOException {
- return new BucketChainBucket(blockSize, factory);
- }
-
-}
Copied: trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/BucketChainBucketFactory.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,38 @@
+package freenet.support.io;
+
+import java.io.IOException;
+
+import freenet.client.async.DBJobRunner;
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+
+public class BucketChainBucketFactory implements BucketFactory {
+
+ final BucketFactory factory;
+ final int blockSize;
+ final DBJobRunner runner;
+ final int segmentSize;
+
+ /**
+ * If you want persistent buckets which will be saved every 1000
buckets, and
+ * deleted on restart if not stored by then, then pass in the
DBJobRunner.
+ * Otherwise pass in null.
+ * @param bucketFactory
+ * @param block_size
+ * @param runner
+ */
+ public BucketChainBucketFactory(BucketFactory bucketFactory, int
block_size, DBJobRunner runner, int segmentSize) {
+ this.factory = bucketFactory;
+ this.blockSize = block_size;
+ this.runner = runner;
+ this.segmentSize = segmentSize;
+ }
+
+ public Bucket makeBucket(long size) throws IOException {
+ if(runner == null)
+ return new BucketChainBucket(blockSize, factory);
+ else
+ return new SegmentedBucketChainBucket(blockSize,
factory, runner, segmentSize);
+ }
+
+}
Modified: trunk/freenet/src/freenet/support/io/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketTools.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/BucketTools.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -15,6 +15,8 @@
import org.spaceroots.mantissa.random.MersenneTwister;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.SHA256;
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -257,7 +259,7 @@
if ((bytesRead < bucketLength) && (bucketLength
> 0))
throw new EOFException();
if ((bytesRead != bucketLength) &&
(bucketLength > 0))
- throw new IOException("Read " +
bytesRead + " but bucket length " + bucketLength + '!');
+ throw new IOException("Read " +
bytesRead + " but bucket length " + bucketLength + " on " + data + '!');
byte[] retval = md.digest();
return retval;
} finally {
@@ -340,17 +342,25 @@
*
* Note that this method will allocate a buffer of size splitSize.
* @param freeData
+ * @param persistent If true, the data is persistent. This method is
responsible for ensuring that the returned
+ * buckets HAVE ALREADY BEEN STORED TO THE DATABASE, using the provided
handle. The point? SegmentedBCB's buckets
+ * have already been stored!!
+ * @param container Database handle, only needed if persistent = true.
* @throws IOException If there is an error creating buckets, reading
from
* the provided bucket, or writing to created buckets.
*/
- public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf, boolean freeData) throws IOException {
+ public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf, boolean freeData, boolean persistent, ObjectContainer
container) throws IOException {
if(origData instanceof FileBucket) {
if(freeData) {
Logger.error(BucketTools.class, "Asked to free
data when splitting a FileBucket ?!?!? Not freeing as this would clobber the
split result...");
}
- return ((FileBucket)origData).split(splitSize);
+ Bucket[] buckets =
((FileBucket)origData).split(splitSize);
+ for(Bucket bucket : buckets)
+ bucket.storeTo(container);
+ return buckets;
}
if(origData instanceof BucketChainBucket) {
+ if(persistent) throw new
IllegalArgumentException("Splitting a BucketChainBucket but persistent =
true!");
BucketChainBucket data = (BucketChainBucket)origData;
if(data.bucketSize == splitSize) {
Bucket[] buckets = data.getBuckets();
@@ -361,6 +371,20 @@
Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is "+data.bucketSize+"
but mine is "+splitSize+" - we will copy the data, but this suggests a bug",
new Exception("debug"));
}
}
+ if(origData instanceof SegmentedBucketChainBucket) {
+ SegmentedBucketChainBucket data =
(SegmentedBucketChainBucket)origData;
+ if(data.bucketSize == splitSize) {
+ Bucket[] buckets = data.getBuckets();
+ if(freeData)
+ data.clear();
+ if(persistent && freeData)
+ data.removeFrom(container);
+ // Buckets have already been stored, no need to
storeTo().
+ return buckets;
+ } else {
+ Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is "+data.bucketSize+"
but mine is "+splitSize+" - we will copy the data, but this suggests a bug",
new Exception("debug"));
+ }
+ }
long length = origData.size();
if(length > ((long)Integer.MAX_VALUE) * splitSize)
throw new IllegalArgumentException("Way too big!:
"+length+" for "+splitSize);
@@ -396,6 +420,12 @@
}
if(freeData)
origData.free();
+ if(persistent && freeData)
+ origData.removeFrom(container);
+ if(persistent) {
+ for(Bucket bucket : buckets)
+ bucket.storeTo(container);
+ }
return buckets;
}
@@ -431,4 +461,5 @@
return b;
} finally { Closer.close(os); }
}
+
}
Modified: trunk/freenet/src/freenet/support/io/DelayedFreeBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/DelayedFreeBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/DelayedFreeBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -9,6 +9,8 @@
import java.io.InputStream;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.RandomSource;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
@@ -19,10 +21,21 @@
private final PersistentFileTracker factory;
Bucket bucket;
boolean freed;
+ boolean removed;
+ boolean reallyRemoved;
+ public boolean toFree() {
+ return freed;
+ }
+
+ public boolean toRemove() {
+ return removed;
+ }
+
public DelayedFreeBucket(PersistentTempBucketFactory factory, Bucket
bucket) {
this.factory = factory;
this.bucket = bucket;
+ if(bucket == null) throw new NullPointerException();
}
public DelayedFreeBucket(SimpleFieldSet fs, RandomSource random,
PersistentFileTracker f) throws CannotCreateFromFieldSetException {
@@ -67,7 +80,7 @@
if(freed) return;
if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Freeing "+this+"
underlying="+bucket, new Exception("debug"));
- this.factory.delayedFreeBucket(bucket);
+ this.factory.delayedFreeBucket(this);
freed = true;
}
}
@@ -88,4 +101,81 @@
return fs;
}
+ public void storeTo(ObjectContainer container) {
+ bucket.storeTo(container);
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing from database: "+this);
+ synchronized(this) {
+ boolean wasQueued = freed || removed;
+ if(!freed)
+ Logger.error(this, "Asking to remove from
database but not freed: "+this, new Exception("error"));
+ removed = true;
+ if(!wasQueued)
+ this.factory.delayedFreeBucket(this);
+ }
+ }
+
+ public String toString() {
+ return super.toString()+":"+bucket;
+ }
+
+ private transient int _activationCount = 0;
+
+ public void objectOnActivate(ObjectContainer container) {
+ StackTraceElement[] elements =
Thread.currentThread().getStackTrace();
+ if(elements != null && elements.length > 100) {
+ System.err.println("Infinite recursion in progress...");
+ }
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Activating "+super.toString()+" :
"+bucket.getClass());
+ if(bucket == this) {
+ Logger.error(this, "objectOnActivate on
DelayedFreeBucket: wrapping self!!!");
+ return;
+ }
+ // Cascading activation of dependancies
+ container.activate(bucket, 1);
+ }
+
+ public Bucket createShadow() throws IOException {
+ return bucket.createShadow();
+ }
+
+ public void realFree() {
+ bucket.free();
+ }
+
+ public void realRemoveFrom(ObjectContainer container) {
+ synchronized(this) {
+ if(reallyRemoved)
+ Logger.error(this, "Calling realRemoveFrom()
twice on "+this);
+ reallyRemoved = true;
+ }
+ bucket.removeFrom(container);
+ container.delete(this);
+ }
+
+// public void objectOnDeactivate(ObjectContainer container) {
+// if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Deactivating "+super.toString()+" : "+bucket, new Exception("debug"));
+// }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(reallyRemoved) {
+ Logger.error(this, "objectCanNew() on "+this+" but
really removed = "+reallyRemoved+" already freed="+freed+" removed="+removed,
new Exception("debug"));
+ return false;
+ }
+ return true;
+ }
+
+ public boolean objectCanUpdate(ObjectContainer container) {
+ if(reallyRemoved) {
+ Logger.error(this, "objectCanUpdate() on "+this+" but
really removed = "+reallyRemoved+" already freed="+freed+" removed="+removed,
new Exception("debug"));
+ return false;
+ }
+ return true;
+ }
+
}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/support/io/FileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/FileBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -4,7 +4,11 @@
package freenet.support.io;
import java.io.File;
+import java.io.IOException;
+import com.db4o.ObjectContainer;
+
+import freenet.support.Logger;
import freenet.support.api.Bucket;
/**
@@ -39,9 +43,13 @@
* @param deleteOnExit If true, delete the file on a clean exit of the
JVM. Irreversible - use with care!
*/
public FileBucket(File file, boolean readOnly, boolean createFileOnly,
boolean deleteOnFinalize, boolean deleteOnExit, boolean deleteOnFree) {
- super(file);
+ super(file, deleteOnExit);
if(file == null) throw new NullPointerException();
+ File origFile = file;
file = file.getAbsoluteFile();
+ // Copy it so we can safely delete it.
+ if(origFile == file)
+ file = new File(file.getPath());
this.readOnly = readOnly;
this.createFileOnly = createFileOnly;
this.file = file;
@@ -99,4 +107,37 @@
protected boolean deleteOnFree() {
return deleteOnFree;
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ Logger.minor(this, "Removing "+this);
+ container.activate(file, 5);
+ container.delete(file);
+ container.delete(this);
+ }
+
+ public void objectOnActivate(ObjectContainer container) {
+ container.activate(file, 5);
+ }
+
+ public void objectOnNew(ObjectContainer container) {
+ Logger.minor(this, "Storing "+this, new Exception("debug"));
+ }
+
+ public void objectOnUpdate(ObjectContainer container) {
+ Logger.minor(this, "Updating "+this, new Exception("debug"));
+ }
+
+ public void objectOnDelete(ObjectContainer container) {
+ Logger.minor(this, "Deleting "+this, new Exception("debug"));
+ }
+
+ public Bucket createShadow() throws IOException {
+ String fnam = new String(file.getPath());
+ File newFile = new File(fnam);
+ return new FileBucket(newFile, true, false, false, false,
false);
+ }
}
Modified: trunk/freenet/src/freenet/support/io/FileUtil.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileUtil.java 2009-04-01 20:12:14 UTC
(rev 26321)
+++ trunk/freenet/src/freenet/support/io/FileUtil.java 2009-04-01 20:34:09 UTC
(rev 26322)
@@ -71,9 +71,16 @@
}
public static File getCanonicalFile(File file) {
+ // Having some problems storing File's in db4o ...
+ // It would start up, and canonicalise a file with path
"/var/lib/freenet-experimental/persistent-temp-24374"
+ // to
/var/lib/freenet-experimental/var/lib/freenet-experimental/persistent-temp-24374
+ // (where /var/lib/freenet-experimental is the current working
dir)
+ // Regenerating from path worked. So do that here.
+ // And yes, it's voodoo.
+ file = new File(file.getPath());
File result;
try {
- result = file.getCanonicalFile();
+ result = file.getAbsoluteFile().getCanonicalFile();
} catch (IOException e) {
result = file.getAbsoluteFile();
}
@@ -180,6 +187,52 @@
return true;
}
+ /**
+ * Like renameTo(), but can move across filesystems, by copying the
data.
+ * @param f
+ * @param file
+ */
+ public static boolean moveTo(File orig, File dest, boolean overwrite) {
+ if(orig.equals(dest))
+ throw new IllegalArgumentException("Huh? the two file
descriptors are the same!");
+ if(!orig.exists()) {
+ throw new IllegalArgumentException("Original doesn't exist!");
+ }
+ if(dest.exists() && overwrite)
+ dest.delete();
+ else {
+ System.err.println("Not overwriting "+dest+" - already
exists moving "+orig);
+ return false;
+ }
+ if(!orig.renameTo(dest)) {
+ // Copy the data
+ InputStream is = null;
+ OutputStream os = null;
+ try {
+ is = new FileInputStream(orig);
+ os = new FileOutputStream(dest);
+ copy(is, os, orig.length());
+ is.close();
+ is = null;
+ os.close();
+ os = null;
+ orig.delete();
+ return true;
+ } catch (IOException e) {
+ dest.delete();
+ Logger.error(FileUtil.class, "Move failed from
"+orig+" to "+dest+" : "+e, e);
+ System.err.println("Move failed from "+orig+"
to "+dest+" : "+e);
+ e.printStackTrace();
+ return false;
+ } finally {
+ Closer.close(is);
+ Closer.close(os);
+ }
+ } else return true;
+ }
+
+
+
public static String sanitize(String s) {
StringBuilder sb = new StringBuilder(s.length());
for(int i=0;i<s.length();i++) {
Modified: trunk/freenet/src/freenet/support/io/FilenameGenerator.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FilenameGenerator.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/FilenameGenerator.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -12,9 +12,9 @@
public class FilenameGenerator {
- private final Random random;
- private final String prefix;
- private final File tmpDir;
+ private transient Random random;
+ private String prefix;
+ private File tmpDir;
/**
* @param random
@@ -81,6 +81,13 @@
public File getFilename(long id) {
return new File(tmpDir, prefix + Long.toHexString(id));
}
+
+ public File makeRandomFile() throws IOException {
+ while(true) {
+ File file = getFilename(makeRandomFilename());
+ if(file.createNewFile()) return file;
+ }
+ }
public boolean matches(File file) {
return getID(file) != -1;
@@ -104,4 +111,97 @@
}
}
+ public File getDir() {
+ return tmpDir;
+ }
+
+ /**
+ * Set up the dir and prefix. Note that while we can change the dir and
prefix, we *cannot do so online*,
+ * at least not on Windows.
+ * @param dir
+ * @param prefix
+ */
+ public void init(File dir, String prefix, Random random) throws
IOException {
+ this.random = random;
+ // There is a problem with putting File's into db4o IIRC ... I
think we workaround this somewhere else?
+ // Symptoms are it trying to move even though the two dirs are
blatantly identical.
+ File oldDir = FileUtil.getCanonicalFile(new
File(tmpDir.getPath()));
+ File newDir = FileUtil.getCanonicalFile(dir);
+ System.err.println("Old: "+oldDir+" prefix "+this.prefix+" from
"+tmpDir+" old path "+tmpDir.getPath()+" old parent "+tmpDir.getParent());
+ System.err.println("New: "+newDir+" prefix "+prefix+" from
"+dir);
+ if(oldDir.equals(newDir) && this.prefix.equals(prefix)) {
+ Logger.normal(this, "Initialised FilenameGenerator
successfully - no change in dir and prefix: dir="+dir+" prefix="+prefix);
+ } else if((!oldDir.equals(newDir)) &&
this.prefix.equals(prefix)) {
+ if((!dir.exists()) && oldDir.renameTo(dir)) {
+ tmpDir = dir;
+ // This will interest the user, since they
changed it.
+ String msg = "Successfully renamed persistent
temporary directory from "+tmpDir+" to "+dir;
+ Logger.error(this, msg);
+ System.err.println(msg);
+ } else {
+ if(!dir.exists()) {
+ if(!dir.mkdir()) {
+ // FIXME localise these errors
somehow??
+ System.err.println("Unable to
create new temporary directory: "+dir);
+ throw new IOException("Unable
to create new temporary directory: "+dir);
+ }
+ }
+ if(!(dir.canRead() && dir.canWrite())) {
+ // FIXME localise these errors somehow??
+ System.err.println("Unable to read and
write new temporary directory: "+dir);
+ throw new IOException("Unable to read
and write new temporary directory: "+dir);
+ }
+ int moved = 0;
+ int failed = 0;
+ // Move each file
+ File[] list = tmpDir.listFiles();
+ for(int i=0;i<list.length;i++) {
+ File f = list[i];
+ String name = f.getName();
+ if(!name.startsWith(prefix)) continue;
+ if(FileUtil.moveTo(f, new File(dir,
name), true))
+ moved++;
+ else
+ failed++;
+ }
+ if(failed > 0) {
+ // FIXME maybe a useralert
+ System.err.println("WARNING: Not all
files successfully moved changing temp dir: "+failed+" failed.");
+ System.err.println("WARNING: Some
persistent downloads etc may fail.");
+ }
+ }
+ } else {
+ if(!dir.exists()) {
+ if(!dir.mkdir()) {
+ // FIXME localise these errors somehow??
+ System.err.println("Unable to create
new temporary directory: "+dir);
+ throw new IOException("Unable to create
new temporary directory: "+dir);
+ }
+ }
+ if(!(dir.canRead() && dir.canWrite())) {
+ // FIXME localise these errors somehow??
+ System.err.println("Unable to read and write
new temporary directory: "+dir);
+ throw new IOException("Unable to read and write
new temporary directory: "+dir);
+ }
+ int moved = 0;
+ int failed = 0;
+ // Move each file
+ File[] list = tmpDir.listFiles();
+ for(int i=0;i<list.length;i++) {
+ File f = list[i];
+ String name = f.getName();
+ if(!name.startsWith(this.prefix)) continue;
+ String newName = prefix +
name.substring(this.prefix.length());
+ if(FileUtil.moveTo(f, new File(dir, newName),
true)) {
+ moved++;
+ }
+ }
+ if(failed > 0) {
+ // FIXME maybe a useralert
+ System.err.println("WARNING: Not all files
successfully moved changing temp dir: "+failed+" failed.");
+ System.err.println("WARNING: Some persistent
downloads etc may fail.");
+ }
+ }
+ }
+
}
Modified: trunk/freenet/src/freenet/support/io/MultiReaderBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/MultiReaderBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/MultiReaderBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -8,6 +8,8 @@
import java.io.OutputStream;
import java.util.ArrayList;
+import com.db4o.ObjectContainer;
+
import freenet.support.Logger;
import freenet.support.api.Bucket;
@@ -134,6 +136,24 @@
protected void finalize() {
free();
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ synchronized(MultiReaderBucket.this) {
+ if(!closed) return;
+ }
+ bucket.removeFrom(container);
+ container.delete(readers);
+ container.delete(MultiReaderBucket.this);
+ }
+
+ public Bucket createShadow() throws IOException {
+ return null;
+ }
}
Copied: trunk/freenet/src/freenet/support/io/NotPersistentBucket.java (from rev
26320, branches/db4o/freenet/src/freenet/support/io/NotPersistentBucket.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/NotPersistentBucket.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/NotPersistentBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,10 @@
+package freenet.support.io;
+
+import freenet.support.api.Bucket;
+
+// A Bucket which does not support being stored to the database. E.g.
SegmentedBCB.
+public interface NotPersistentBucket extends Bucket {
+
+ // No methods
+
+}
Modified: trunk/freenet/src/freenet/support/io/NullBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/NullBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/NullBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -2,9 +2,12 @@
* Public License, version 2 (or at your option any later version). See
* http://www.gnu.org/ for further details of the GPL. */
package freenet.support.io;
+import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import com.db4o.ObjectContainer;
+
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -63,5 +66,17 @@
fs.putSingle("Type", "NullBucket");
return fs;
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(this);
+ }
+
+ public Bucket createShadow() throws IOException {
+ return new NullBucket();
+ }
}
Modified: trunk/freenet/src/freenet/support/io/NullPersistentFileTracker.java
===================================================================
--- trunk/freenet/src/freenet/support/io/NullPersistentFileTracker.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/NullPersistentFileTracker.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -23,7 +23,7 @@
// Do nothing
}
- public void delayedFreeBucket(Bucket bucket) {
+ public void delayedFreeBucket(DelayedFreeBucket bucket) {
// Free immediately
bucket.free();
}
Modified:
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -6,9 +6,12 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.lang.ref.SoftReference;
import java.util.Random;
+import org.spaceroots.mantissa.random.MersenneTwister;
+
+import com.db4o.ObjectContainer;
+
import freenet.crypt.PCFBMode;
import freenet.crypt.RandomSource;
import freenet.crypt.UnsupportedCipherException;
@@ -27,10 +30,9 @@
private final Bucket bucket;
private final int minPaddedSize;
- private final Random randomSource;
- private SoftReference<Rijndael> aesRef;
/** The decryption key. */
private final byte[] key;
+ private final byte[] randomSeed;
private long dataLength;
private boolean readOnly;
private int lastOutputStream;
@@ -42,13 +44,16 @@
* @param minSize The minimum padded size of the file (after it has
been closed).
* @param strongPRNG a strong prng we will key from.
* @param weakPRNG a week prng we will padd from.
+ * Serialization: Note that it is not our responsibility to free the
random number generators,
+ * but we WILL free the underlying bucket.
* @throws UnsupportedCipherException
*/
public PaddedEphemerallyEncryptedBucket(Bucket bucket, int minSize,
RandomSource strongPRNG, Random weakPRNG) {
- this.randomSource = weakPRNG;
this.bucket = bucket;
if(bucket.size() != 0) throw new
IllegalArgumentException("Bucket must be empty");
byte[] tempKey = new byte[32];
+ randomSeed = new byte[32];
+ weakPRNG.nextBytes(randomSeed);
strongPRNG.nextBytes(tempKey);
this.key = tempKey;
this.minPaddedSize = minSize;
@@ -72,9 +77,10 @@
if(bucket.size() < knownSize)
throw new IOException("Bucket "+bucket+" is too small
on disk - knownSize="+knownSize+" but bucket.size="+bucket.size()+" for
"+bucket);
this.dataLength = knownSize;
- this.randomSource = origRandom;
this.bucket = bucket;
if(key.length != 32) throw new IllegalArgumentException("Key
wrong length: "+key.length);
+ randomSeed = new byte[32];
+ origRandom.nextBytes(randomSeed);
this.key = key;
this.minPaddedSize = minSize;
readOnly = false;
@@ -82,7 +88,6 @@
}
public PaddedEphemerallyEncryptedBucket(SimpleFieldSet fs, RandomSource
origRandom, PersistentFileTracker f) throws CannotCreateFromFieldSetException {
- this.randomSource = origRandom;
String tmp = fs.get("DataLength");
if(tmp == null)
throw new CannotCreateFromFieldSetException("No
DataLength");
@@ -112,8 +117,20 @@
}
if(dataLength > bucket.size())
throw new CannotCreateFromFieldSetException("Underlying
bucket "+bucket+" is too small: should be "+dataLength+" actually
"+bucket.size());
+ randomSeed = new byte[32];
+ origRandom.nextBytes(randomSeed);
}
+ public
PaddedEphemerallyEncryptedBucket(PaddedEphemerallyEncryptedBucket orig, Bucket
newBucket) {
+ this.dataLength = orig.dataLength;
+ this.key = new byte[orig.key.length];
+ System.arraycopy(orig.key, 0, key, 0, orig.key.length);
+ this.randomSeed = null; // Will be read-only
+ setReadOnly();
+ this.bucket = newBucket;
+ this.minPaddedSize = orig.minPaddedSize;
+ }
+
public OutputStream getOutputStream() throws IOException {
if(readOnly) throw new IOException("Read only");
OutputStream os = bucket.getOutputStream();
@@ -186,6 +203,7 @@
Logger.normal(this, "Not padding out to
length because have been superceded: "+getName());
return;
}
+ Random random = new MersenneTwister(randomSeed);
synchronized(PaddedEphemerallyEncryptedBucket.this) {
long finalLength = paddedLength();
long padding = finalLength - dataLength;
@@ -193,7 +211,7 @@
long writtenPadding = 0;
while(writtenPadding < padding) {
int left = (int)
Math.min((long) (padding - writtenPadding), (long) buf.length);
- randomSource.nextBytes(buf);
+ random.nextBytes(buf);
out.write(buf, 0, left);
writtenPadding += left;
}
@@ -302,17 +320,12 @@
private synchronized Rijndael getRijndael() {
Rijndael aes;
- if(aesRef != null) {
- aes = aesRef.get();
- if(aes != null) return aes;
- }
try {
aes = new Rijndael(256, 256);
} catch (UnsupportedCipherException e) {
throw new Error(e);
}
aes.initialize(key);
- aesRef = new SoftReference<Rijndael>(aes);
return aes;
}
@@ -377,4 +390,29 @@
return fs;
}
+ public void storeTo(ObjectContainer container) {
+ bucket.storeTo(container);
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing from database: "+this);
+ bucket.removeFrom(container);
+ // The random is passed in and not our responsibility.
+ container.delete(this);
+ }
+
+ public void objectOnActivate(ObjectContainer container) {
+ Logger.minor(this, "Activating "+super.toString()+" bucket ==
null = "+(bucket == null));
+ // Cascading activation of dependancies
+ container.activate(bucket, 1);
+ }
+
+ public Bucket createShadow() throws IOException {
+ Bucket newUnderlying = bucket.createShadow();
+ if(newUnderlying == null) return null;
+ return new PaddedEphemerallyEncryptedBucket(this,
newUnderlying);
+ }
+
}
Copied: trunk/freenet/src/freenet/support/io/PersistentBlobTempBucket.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/PersistentBlobTempBucket.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentBlobTempBucket.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/PersistentBlobTempBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,250 @@
+package freenet.support.io;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import com.db4o.ObjectContainer;
+
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+
+/**
+ * A persistent temp bucket stored as a blob in a
PersistentBlobTempBucketFactory.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class PersistentBlobTempBucket implements Bucket {
+
+ public final long blockSize;
+ long size;
+ public final PersistentBlobTempBucketFactory factory;
+ /** The index into the blob file of this specific bucket */
+ final long index;
+ private boolean freed;
+ private boolean readOnly;
+ /** Has this bucket been persisted? If not, it will be only on the
temporary
+ * map in the factory. */
+ private boolean persisted;
+ private final int hashCode;
+ final PersistentBlobTempBucketTag tag;
+ private boolean shadow;
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public PersistentBlobTempBucket(PersistentBlobTempBucketFactory
factory2, long blockSize2, long slot, PersistentBlobTempBucketTag tag, boolean
shadow) {
+ factory = factory2;
+ blockSize = blockSize2;
+ index = slot;
+ hashCode = super.hashCode();
+ if(tag == null && !shadow) throw new NullPointerException();
+ this.tag = tag;
+ this.shadow = shadow;
+ this.readOnly = shadow;
+ }
+
+ public Bucket createShadow() throws IOException {
+ return factory.createShadow(this);
+ }
+
+ public void free() {
+ if(shadow)
+ factory.freeShadow(index, this);
+ else
+ factory.freeBucket(index, this); // will call onFree():
always take the outer lock first.
+ }
+
+ public boolean freed() {
+ return freed;
+ }
+
+ synchronized void onFree() {
+ freed = true;
+ }
+
+ boolean persisted() {
+ return persisted;
+ }
+
+ private int inputStreams;
+
+ public InputStream getInputStream() throws IOException {
+ if(freed) throw new IOException("Already freed");
+ final FileChannel channel = factory.channel;
+ return new InputStream() {
+
+ private int offset;
+ private boolean closed;
+
+ {
+ synchronized(PersistentBlobTempBucket.this) {
+ inputStreams++;
+ }
+ }
+
+ @Override
+ public int read() throws IOException {
+ byte[] buf = new byte[1];
+ int res = read(buf);
+ if(res == -1) return -1;
+ return buf[0];
+ }
+
+ @Override
+ public int read(byte[] buffer, int bufOffset, int
length) throws IOException {
+ long max;
+ synchronized(PersistentBlobTempBucket.this) {
+ if(freed) throw new IOException("Bucket
freed during read");
+ max = Math.min(blockSize, size);
+ }
+ if(length == 0) return 0;
+ if(bufOffset < 0) return -1; // throw new
EOFException() ???
+ if(offset + length >= max)
+ length = (int) Math.min(max - offset,
Integer.MAX_VALUE);
+ if(length == 0) return -1;
+ if(length < 0) throw new
IllegalStateException("offset="+bufOffset+" length="+length+" buf len =
"+buffer.length+" my offset is "+offset+" my size is "+max+" for "+this+" for
"+PersistentBlobTempBucket.this);
+ ByteBuffer buf = ByteBuffer.wrap(buffer,
bufOffset, length);
+ int read = channel.read(buf, blockSize * index
+ offset);
+ if(read > 0) offset += read;
+ return read;
+ }
+
+ @Override
+ public int read(byte[] buffer) throws IOException {
+ return read(buffer, 0, buffer.length);
+ }
+
+ public int available() {
+ return (int) Math.min(blockSize - offset,
Integer.MAX_VALUE);
+ }
+
+ public void close() {
+ synchronized(PersistentBlobTempBucket.this) {
+ inputStreams--;
+ }
+ // Do nothing.
+ }
+
+ };
+ }
+
+ public String getName() {
+ return factory.getName()+":"+index;
+ }
+
+ public OutputStream getOutputStream() throws IOException {
+ if(freed) throw new IOException("Already freed");
+ if(shadow) throw new IOException("Shadow");
+ if(readOnly) throw new IOException("Read-only");
+ final FileChannel channel = factory.channel;
+
+ return new OutputStream() {
+
+ private int offset;
+
+ @Override
+ public void write(int arg) throws IOException {
+ byte[] buf = new byte[] { (byte) arg };
+ write(buf, 0, 1);
+ }
+
+ @Override
+ public void write(byte[] buffer, int bufOffset, int
length) throws IOException {
+ synchronized(PersistentBlobTempBucket.this) {
+ if(freed) throw new IOException("Bucket
freed during write");
+ if(readOnly) throw new
IOException("Bucket made read only during write");
+ }
+ long remaining = blockSize - offset;
+ if(remaining <= 0) throw new IOException("Too
big");
+ if(length > remaining) throw new
IOException("Writing too many bytes: written "+offset+" of "+blockSize+" and
now want to write "+length);
+ ByteBuffer buf = ByteBuffer.wrap(buffer,
bufOffset, length);
+ int written = 0;
+ while(written < length) {
+ int w = channel.write(buf, blockSize *
index + offset);
+ offset += w;
+
synchronized(PersistentBlobTempBucket.this) {
+ size += w;
+ }
+ written += w;
+ }
+ }
+
+ @Override
+ public void write(byte[] buffer) throws IOException {
+ write(buffer, 0, buffer.length);
+ }
+
+ };
+ }
+
+ public synchronized boolean isReadOnly() {
+ return readOnly;
+ }
+
+ public synchronized void setReadOnly() {
+ readOnly = true;
+ }
+
+ public synchronized long size() {
+ return size;
+ }
+
+ // When created, we take up a slot in the temporary (in-RAM) map on the
factory.
+ // When storeTo() is called the first time, we are committed to a
persistent
+ // structure. When removeFrom() is called afterwards, we are moved back
to the
+ // temporary map, unless we have been freed.
+
+ public void storeTo(ObjectContainer container) {
+ if(shadow) {
+ throw new UnsupportedOperationException("Can't store a
shadow");
+ }
+ boolean p;
+ // Race conditions with storeTo and removeFrom running on
different threads
+ // in parallel are possible... that sort of behaviour *should*
be very rare,
+ // you should always store it before making it publicly
available...
+ synchronized(this) {
+ if(tag == null) throw new NullPointerException();
+ p = persisted;
+ persisted = true;
+ }
+ if(!p)
+ factory.store(this, container); // Calls onStore().
Take the outer lock first.
+ }
+
+ public boolean objectCanNew(ObjectContainer container) {
+ if(shadow) throw new UnsupportedOperationException("Can't store
a shadow");
+ synchronized(this) {
+ if(persisted) return true;
+ }
+ Logger.error(this, "objectOnNew() called but we haven't been
stored yet! for "+this+" for "+factory+" index "+index, new Exception("error"));
+ return true;
+
+ }
+
+ public boolean objectCanDeactivate(ObjectContainer container) {
+ if(inputStreams > 0) {
+ Logger.error(this, "Deactivating when have active input
streams!", new Exception("error"));
+ return false;
+ }
+ return true;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(shadow) throw new UnsupportedOperationException("Can't store
a shadow");
+ boolean p;
+ synchronized(this) {
+ p = persisted;
+ }
+ if(p)
+ factory.remove(this, container); // Calls onRemove().
+ container.delete(this);
+ }
+
+ synchronized void onRemove() {
+ persisted = false;
+ }
+
+}
Copied:
trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketFactory.java (from
rev 26320,
branches/db4o/freenet/src/freenet/support/io/PersistentBlobTempBucketFactory.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketFactory.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketFactory.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,641 @@
+package freenet.support.io;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Query;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.node.Ticker;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+
+/**
+ * Simple temporary storage mechanism using a single file (or a small number
of
+ * files), and storing buckets of precisely the block size only. Buckets may
not
+ * exceed the block size; the rest of the node should only call us if the
bucket
+ * is of the correct maximum size, and should fall back to one-file-per-bucket
+ * otherwise.
+ *
+ * Currently we only use one blob file. This means that on FAT and some other
+ * filesystems, the node will have to fall back once we reach 2GB of temp
files.
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class PersistentBlobTempBucketFactory {
+
+ public final long blockSize;
+ private File storageFile;
+ private transient RandomAccessFile raf;
+ private transient HashSet<DBJob> freeJobs;
+ /** We use NIO for the equivalent of pwrite/pread. This is parallelized
on unix
+ * but sadly not on Windows. */
+ transient FileChannel channel;
+
+ /** Blobs in memory only: in the database there will still be a "free"
tag */
+ private transient TreeMap<Long,PersistentBlobTempBucket>
notCommittedBlobs;
+
+ /** Non-exhaustive list of free slots. If we run out we need to query
for
+ * more. */
+ private transient TreeMap<Long,PersistentBlobTempBucketTag> freeSlots;
+
+ /** Recently freed slots, cannot be reused until after commit.
+ * Similar to notCommittedBlobs. */
+ private transient TreeMap<Long,PersistentBlobTempBucketTag>
almostFreeSlots;
+
+ private transient TreeMap<Long,PersistentBlobTempBucket> shadows;
+
+ private transient DBJobRunner jobRunner;
+
+ private transient Random weakRandomSource;
+
+ private transient Ticker ticker;
+
+ private final long nodeDBHandle;
+
+ public PersistentBlobTempBucketFactory(long blockSize2, long
nodeDBHandle2, File storageFile2) {
+ blockSize = blockSize2;
+ nodeDBHandle = nodeDBHandle2;
+ storageFile = storageFile2;
+ }
+
+ void onInit(ObjectContainer container, DBJobRunner jobRunner2, Random
fastWeakRandom, File storageFile2, long blockSize2, Ticker ticker) throws
IOException {
+ container.activate(storageFile, 100);
+ File oldFile = FileUtil.getCanonicalFile(new
File(storageFile.getPath())); // db4o argh
+ File newFile = FileUtil.getCanonicalFile(new
File(storageFile2.getPath()));
+ if(blockSize != blockSize2)
+ throw new IllegalStateException("My block size is
"+blockSize2+
+ " but stored block size is "+blockSize+
+ " for same file "+storageFile);
+ if(!(oldFile.equals(newFile) ||
+ (File.separatorChar == '\\' ?
oldFile.getPath().toLowerCase().equals(newFile.getPath().toLowerCase()) :
oldFile.getPath().equals(newFile.getPath())))) {
+ if(!FileUtil.moveTo(storageFile, storageFile2, false))
+ throw new IOException("Unable to move temp blob
file from "+storageFile+" to "+storageFile2);
+ }
+ raf = new RandomAccessFile(storageFile, "rw");
+ channel = raf.getChannel();
+ notCommittedBlobs = new
TreeMap<Long,PersistentBlobTempBucket>();
+ freeSlots = new TreeMap<Long,PersistentBlobTempBucketTag>();
+ almostFreeSlots = new
TreeMap<Long,PersistentBlobTempBucketTag>();
+ shadows = new TreeMap<Long,PersistentBlobTempBucket>();
+ jobRunner = jobRunner2;
+ weakRandomSource = fastWeakRandom;
+ freeJobs = new HashSet<DBJob>();
+ this.ticker = ticker;
+
+ maybeShrink(container);
+
+ // Diagnostics
+
+ long size;
+ try {
+ size = channel.size();
+ } catch (IOException e1) {
+ Logger.error(this, "Unable to find size of temp blob
storage file: "+e1, e1);
+ return;
+ }
+ size -= size % blockSize;
+ long blocks = size / blockSize;
+ long ptr = blocks - 1;
+
+ long used = 0;
+ long rangeStart = Long.MIN_VALUE;
+ PersistentBlobTempBucketTag firstInRange = null;
+ for(long l = 0; l < ptr; l++) {
+ synchronized(this) {
+ if(freeSlots.containsKey(l)) continue;
+ if(notCommittedBlobs.containsKey(l)) continue;
+ if(almostFreeSlots.containsKey(l)) continue;
+ }
+ Query query = container.query();
+ query.constrain(PersistentBlobTempBucketTag.class);
+ query.descend("index").constrain(l);
+ ObjectSet<PersistentBlobTempBucketTag> tags =
query.execute();
+ if(tags.hasNext()) {
+ PersistentBlobTempBucketTag tag = tags.next();
+ if(!tag.isFree)
+ used++;
+ if(tag.bucket == null && !tag.isFree)
+ Logger.error(this, "No bucket but
flagged as not free: index "+l+" "+tag.bucket);
+ if(tag.bucket != null && tag.isFree)
+ Logger.error(this, "Has bucket but
flagged as free: index "+l+" "+tag.bucket);
+ if(!tag.isFree) {
+ if(rangeStart == Long.MIN_VALUE) {
+ rangeStart = l;
+ firstInRange = tag;
+ }
+ } else {
+ if(rangeStart != Long.MIN_VALUE) {
+ System.out.println("Range:
"+rangeStart+" to "+(l-1)+" first is "+firstInRange);
+ rangeStart = Long.MIN_VALUE;
+ firstInRange = null;
+ }
+ }
+ continue;
+ }
+ Logger.error(this, "FOUND EMPTY SLOT: "+l+" when
scanning the blob file because tags in database < length of file");
+ PersistentBlobTempBucketTag tag = new
PersistentBlobTempBucketTag(PersistentBlobTempBucketFactory.this, l);
+ container.store(tag);
+ synchronized(this) {
+ freeSlots.put(ptr, tag);
+ }
+ }
+ if(rangeStart != Long.MIN_VALUE) {
+ System.out.println("Range: "+rangeStart+" to "+(ptr-1));
+ }
+ System.err.println("Persistent blobs: Blocks: "+blocks+" used
"+used);
+ }
+
+ public String getName() {
+ return storageFile.getPath();
+ }
+
+ static final int MAX_FREE = 2048;
+
+ private final DBJob slotFinder = new DBJob() {
+
+ public void run(ObjectContainer container, ClientContext
context) {
+ int added = 0;
+
+ while(true) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ synchronized(PersistentBlobTempBucketFactory.this) {
+ if(freeSlots.size() > MAX_FREE) return;
+ }
+ long size;
+ try {
+ size = channel.size();
+ } catch (IOException e1) {
+ Logger.error(this, "Unable to find size of temp
blob storage file: "+e1, e1);
+ return;
+ }
+ size -= size % blockSize;
+ long blocks = size / blockSize;
+ long ptr = blocks - 1;
+
+ for(long l = 0; l < blockSize + 16383; l += 16384) {
+ Query query = container.query();
+ query.constrain(PersistentBlobTempBucketTag.class);
+
query.descend("isFree").constrain(true).and(query.descend("index").constrain(l).smaller());
+ ObjectSet<PersistentBlobTempBucketTag> tags =
query.execute();
+ Long[] notCommitted;
+ synchronized(PersistentBlobTempBucketFactory.this) {
+ while(tags.hasNext()) {
+ PersistentBlobTempBucketTag tag =
tags.next();
+ if(!tag.isFree) {
+ Logger.error(this, "Tag not
free! "+tag.index);
+ if(tag.bucket == null) {
+ Logger.error(this, "Tag
flagged non-free yet has no bucket for index "+tag.index);
+ tag.isFree = true;
+ } else continue;
+ }
+ if(tag.bucket != null) {
+ Logger.error(this, "Query
returned tag with valid bucket!");
+ continue;
+ }
+ if(tag.factory !=
PersistentBlobTempBucketFactory.this) continue;
+
if(notCommittedBlobs.containsKey(tag.index)) continue;
+
if(almostFreeSlots.containsKey(tag.index)) continue;
+ if(freeSlots.containsKey(tag.index))
continue;
+ if(tag.bucket != null) {
+ Logger.error(this, "Bucket is
occupied but not in notCommittedBlobs?!: "+tag+" : "+tag.bucket);
+ continue;
+ }
+ if(logMINOR) Logger.minor(this, "Adding
slot "+tag.index+" to freeSlots (has a free tag and no taken tag)");
+ freeSlots.put(tag.index, tag);
+ added++;
+ if(added > MAX_FREE) return;
+ }
+ }
+ }
+
+ // Checking for slots marked occupied with bucket !=
null is nontrivial,
+ // because constraining to null doesn't work - causes
an OOM with a large database,
+ // because it DOES NOT USE THE INDEX and therefore
instantiates every object and OOMs.
+ // See http://tracker.db4o.com/browse/COR-1446
+
+ // Check that the number of tags is equal to the size
of the file.
+
+ if(logMINOR) Logger.minor(this, "Checking number of
tags against file size...");
+ Query query = container.query();
+ query.constrain(PersistentBlobTempBucketTag.class);
+ ObjectSet<PersistentBlobTempBucketTag> tags =
query.execute();
+ long inDB = tags.size();
+ if(logMINOR) Logger.minor(this, "Checked size.");
+ tags = null;
+ if(inDB < ptr) {
+ Logger.error(this, "Tags in database: "+inDB+"
but size of file allows: "+ptr);
+ // Recover: exhaustive index search. This can
cause very long pauses, but should only happen if there is a bug.
+ for(long l = 0; l < ptr; l++) {
+ synchronized(this) {
+ if(freeSlots.containsKey(l))
continue;
+
if(notCommittedBlobs.containsKey(l)) continue;
+
if(almostFreeSlots.containsKey(l)) continue;
+ }
+ query = container.query();
+
query.constrain(PersistentBlobTempBucketTag.class);
+ query.descend("index").constrain(l);
+ tags = query.execute();
+ if(tags.hasNext()) continue;
+ Logger.error(this, "FOUND EMPTY SLOT:
"+l+" when scanning the blob file because tags in database < length of file");
+ PersistentBlobTempBucketTag tag = new
PersistentBlobTempBucketTag(PersistentBlobTempBucketFactory.this, l);
+ container.store(tag);
+ synchronized(this) {
+ freeSlots.put(ptr, tag);
+ }
+ added++;
+ if(added > MAX_FREE) return;
+ }
+ }
+
+ DBJob freeJob = null;
+ synchronized(this) {
+ if(!freeJobs.isEmpty()) {
+ freeJob = freeJobs.iterator().next();
+ freeJobs.remove(freeJob);
+ }
+ }
+ if(freeJob != null) {
+ container.activate(freeJob, 1);
+ System.err.println("Freeing some space by
running "+freeJob);
+ Logger.minor(this, "Freeing some space by
running "+freeJob);
+ freeJob.run(container, context);
+ continue;
+ }
+
+ // Lets extend the file.
+ // FIXME if physical security is LOW, just set the
length, possibly
+ // padding will nonrandom nulls on unix.
+ long addBlocks = Math.min(8192, (blocks / 10) + 32);
+ long extendBy = addBlocks * blockSize;
+ long written = 0;
+ byte[] buf = new byte[4096];
+ ByteBuffer buffer = ByteBuffer.wrap(buf);
+ while(written < extendBy) {
+ weakRandomSource.nextBytes(buf);
+ int bytesLeft = (int) Math.min(extendBy -
written, Integer.MAX_VALUE);
+ if(bytesLeft < buf.length)
+ buffer.limit(bytesLeft);
+ try {
+ written += channel.write(buffer, size +
written);
+ buffer.clear();
+ } catch (IOException e) {
+ break;
+ }
+ }
+ query = container.query();
+ query.constrain(PersistentBlobTempBucketTag.class);
+
query.descend("index").constrain(blocks-1).greater().and(query.descend("factory").constrain(PersistentBlobTempBucketFactory.this));
+ HashSet<Long> taken = null;
+ ObjectSet<PersistentBlobTempBucketTag> results =
query.execute();
+ while(results.hasNext()) {
+ PersistentBlobTempBucketTag tag =
results.next();
+ if(!tag.isFree) {
+ Logger.error(this, "Block already
exists beyond the end of the file, yet is occupied: block "+tag.index);
+ }
+ if(taken == null) taken = new HashSet<Long>();
+ taken.add(tag.index);
+ }
+
+ for(int i=0;i<addBlocks;i++) {
+ ptr = blocks + i;
+ if(taken != null && taken.contains(ptr))
continue;
+ PersistentBlobTempBucketTag tag = new
PersistentBlobTempBucketTag(PersistentBlobTempBucketFactory.this, ptr);
+ container.store(tag);
+
synchronized(PersistentBlobTempBucketFactory.this) {
+ if(logMINOR)
+ Logger.minor(this, "Adding slot
"+ptr+" to freeSlots while extending storage file");
+ freeSlots.put(ptr, tag);
+ }
+ }
+ return;
+ }
+ }
+
+ };
+
+ /**
+ * @return A bucket, or null in various failure cases.
+ */
+ public PersistentBlobTempBucket makeBucket() {
+ // Find a free slot.
+ synchronized(this) {
+ if(!freeSlots.isEmpty()) {
+ Long slot = freeSlots.firstKey();
+ PersistentBlobTempBucketTag tag =
freeSlots.remove(slot);
+ if(notCommittedBlobs.get(slot) != null ||
almostFreeSlots.get(slot) != null) {
+ Logger.error(this, "Slot "+slot+"
already occupied by a not committed blob despite being in freeSlots!!");
+ return null;
+ }
+ PersistentBlobTempBucket bucket = new
PersistentBlobTempBucket(this, blockSize, slot, tag, false);
+ notCommittedBlobs.put(slot, bucket);
+ if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Using slot "+slot+" for "+bucket);
+ return bucket;
+ }
+ }
+ jobRunner.runBlocking(slotFinder, NativeThread.HIGH_PRIORITY);
+ synchronized(this) {
+ if(!freeSlots.isEmpty()) {
+ Long slot = freeSlots.firstKey();
+ PersistentBlobTempBucketTag tag =
freeSlots.remove(slot);
+ if(notCommittedBlobs.get(slot) != null ||
almostFreeSlots.get(slot) != null) {
+ Logger.error(this, "Slot "+slot+"
already occupied by a not committed blob despite being in freeSlots!!");
+ return null;
+ }
+ PersistentBlobTempBucket bucket = new
PersistentBlobTempBucket(this, blockSize, slot, tag, false);
+ notCommittedBlobs.put(slot, bucket);
+ if(Logger.shouldLog(Logger.MINOR, this))
Logger.minor(this, "Using slot "+slot+" for "+bucket+" (after waiting)");
+ return bucket;
+ }
+ }
+ Logger.error(this, "Returning null, unable to create a bucket
for some reason, node will fallback to file-based buckets");
+ return null;
+ }
+
+ public synchronized void freeBucket(long index,
PersistentBlobTempBucket bucket) {
+ if(Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this,
"Freeing index "+index+" for "+bucket, new Exception("debug"));
+ notCommittedBlobs.remove(index);
+ bucket.onFree();
+ if(!bucket.persisted()) {
+ // If it hasn't been written to the database, it
doesn't need to be removed, so removeFrom() won't be called.
+ freeSlots.put(index, bucket.tag);
+ }
+ PersistentBlobTempBucket shadow = shadows.get(index);
+ if(shadow != null) {
+ shadow.freed();
+ }
+ }
+
+ private long lastCheckedEnd = -1;
+
+ public synchronized void remove(PersistentBlobTempBucket bucket,
ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing bucket "+bucket+" for slot
"+bucket.index+" from database", new Exception("debug"));
+ long index = bucket.index;
+ PersistentBlobTempBucketTag tag = bucket.tag;
+ if(tag == null) {
+ if(!container.ext().isActive(bucket)) {
+ Logger.error(this, "BUCKET NOT ACTIVE IN
REMOVE: "+bucket, new Exception("error"));
+ container.activate(bucket, 1);
+ tag = bucket.tag;
+ index = bucket.index;
+ } else {
+ // THIS IS IMPOSSIBLE, yet saces has seen it in
practice ... lets get some detail...
+ Logger.error(this, "NO TAG ON BUCKET REMOVING:
"+bucket+" index "+index, new Exception("error"));
+ Query query = container.query();
+
query.constrain(PersistentBlobTempBucketTag.class);
+ query.descend("index").constrain(index);
+ ObjectSet<PersistentBlobTempBucketTag> results
= query.execute();
+ if(!results.hasNext()) {
+ Logger.error(this, "TAG DOES NOT EXIST
FOR INDEX "+index);
+ } else {
+ tag = results.next();
+ if(tag.index != index)
+ // Crazy things are happening,
may as well check the impossible!
+ Logger.error(this, "INVALID
INDEX: should be "+index+" but is "+tag.index);
+ if(tag.isFree)
+ Logger.error(this, "FOUND TAG
BUT IS FREE: "+tag);
+ if(tag.bucket == null) {
+ Logger.error(this, "FOUND TAG
BUT NO BUCKET: "+tag);
+ } else if(tag.bucket == bucket) {
+ Logger.error(this, "TAG LINKS
TO BUCKET BUT BUCKET DOESN'T LINK TO TAG");
+ } else { // tag.bucket != bucket
+ Logger.error(this, "SERIOUS
ERROR: TAG BELONGS TO A DIFFERENT BUCKET!!!");
+ }
+ }
+ }
+ }
+ container.activate(tag, 1);
+ if(!bucket.persisted()) {
+ maybeShrink(container);
+ return;
+ }
+ if(!bucket.freed()) {
+ Logger.error(this, "Removing bucket "+bucket+" for slot
"+bucket.index+" but not freed!", new Exception("debug"));
+ notCommittedBlobs.put(index, bucket);
+ } else {
+ almostFreeSlots.put(index, tag);
+ }
+ tag.bucket = null;
+ tag.isFree = true;
+ container.store(tag);
+ container.delete(bucket);
+ bucket.onRemove();
+
+ maybeShrink(container);
+ }
+
+ void maybeShrink(ObjectContainer container) {
+
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ if(logMINOR) Logger.minor(this, "maybeShrink()");
+ long now = System.currentTimeMillis();
+
+ long newBlocks;
+
+ synchronized(this) {
+
+ if(now - lastCheckedEnd > 60*1000) {
+ if(logMINOR) Logger.minor(this, "maybeShrink() inner");
+ // Check whether there is a big white space at the end
of the file.
+ long size;
+ try {
+ size = channel.size();
+ } catch (IOException e1) {
+ Logger.error(this, "Unable to find size of temp
blob storage file: "+e1, e1);
+ return;
+ }
+ size -= size % blockSize;
+ long blocks = size / blockSize;
+ if(blocks <= 32) {
+ if(logMINOR) Logger.minor(this, "Not shrinking,
blob file not larger than a megabyte");
+ lastCheckedEnd = now;
+ queueMaybeShrink();
+ return;
+ }
+ long lastNotCommitted = notCommittedBlobs.isEmpty() ? 0
: notCommittedBlobs.lastKey();
+ long lastAlmostFreed = almostFreeSlots.isEmpty() ? 0 :
almostFreeSlots.lastKey();
+ if(lastNotCommitted < lastAlmostFreed) {
+ if(logMINOR) Logger.minor(this, "Last almost
freed: "+lastAlmostFreed+" replacing last not committed: "+lastNotCommitted);
+ lastNotCommitted = lastAlmostFreed;
+ }
+ double full = (double)lastNotCommitted / (double)blocks;
+ if(full > 0.8) {
+ if(logMINOR) Logger.minor(this, "Not shrinking,
last not committed block is at "+full*100+"% ("+lastNotCommitted+" of
"+blocks+")");
+ lastCheckedEnd = now;
+ queueMaybeShrink();
+ return;
+ }
+ /*
+ * Query for the non-free tag with the highest value.
+ * This query can return a vast number of objects! And
it's all kept in RAM in IMMEDIATE mode.
+ * FIXME LAZY query mode may help, but would likely
require changes to other code.
+ * In the meantime, lets try from the end, going
backwards by a manageable number of slots at a time...
+ */
+ long lastCommitted = -1;
+ for(long threshold = blocks - 4096; threshold >= -4095;
threshold -= 4096) {
+ Query query = container.query();
+
query.constrain(PersistentBlobTempBucketTag.class);
+ query.descend("isFree").constrain(false);
+ query.descend("index").orderDescending();
+
query.descend("index").constrain(threshold).greater();
+ ObjectSet<PersistentBlobTempBucketTag> tags =
query.execute();
+ if(tags.isEmpty()) {
+ // No used slots after threshold.
+ continue;
+ } else {
+ lastCommitted = tags.next().index;
+ Logger.normal(this, "Last committed
block is "+lastCommitted);
+ break;
+ }
+ }
+ if(lastCommitted == -1) {
+ // No used slots at all?!
+ // There may be some not committed though
+ Logger.normal(this, "No used slots in
persistent temp file (but last not committed = "+lastNotCommitted+")");
+ lastCommitted = 0;
+ }
+ full = (double) lastCommitted / (double) blocks;
+ if(full > 0.8) {
+ if(logMINOR) Logger.minor(this, "Not shrinking,
last committed block is at "+full*100+"%");
+ lastCheckedEnd = now;
+ queueMaybeShrink();
+ return;
+ }
+ long lastBlock = Math.max(lastCommitted,
lastNotCommitted);
+ // Must be 10% free at end
+ newBlocks = (long) ((lastBlock + 32) * 1.1);
+ newBlocks = Math.max(newBlocks, 32);
+ if(newBlocks >= blocks) {
+ if(logMINOR) Logger.minor(this, "Not shrinking,
would shrink from "+blocks+" to "+newBlocks);
+ lastCheckedEnd = now;
+ queueMaybeShrink();
+ return;
+ }
+ System.err.println("Shrinking blob file from "+blocks+"
to "+newBlocks);
+ for(long l = newBlocks; l <= blocks; l++) {
+ freeSlots.remove(l);
+ }
+ for(Long l : freeSlots.keySet()) {
+ if(l > newBlocks) {
+ Logger.error(this, "Removing free slot
"+l+" over the current block limit");
+ }
+ }
+ lastCheckedEnd = now;
+ queueMaybeShrink();
+ } else return;
+ }
+ try {
+ channel.truncate(newBlocks * blockSize);
+ } catch (IOException e) {
+ System.err.println("Shrinking blob file failed!");
+ System.err.println(e);
+ e.printStackTrace();
+ Logger.error(this, "Shrinking blob file failed!: "+e,
e);
+ }
+ Query query = container.query();
+ query.constrain(PersistentBlobTempBucketTag.class);
+ query.descend("index").constrain(newBlocks).greater();
+ ObjectSet<PersistentBlobTempBucketTag> tags = query.execute();
+ while(tags.hasNext()) container.delete(tags.next());
+
+ }
+
+ private void queueMaybeShrink() {
+ ticker.queueTimedJob(new Runnable() {
+
+ public void run() {
+ jobRunner.queue(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+ maybeShrink(container);
+ }
+
+ }, NativeThread.NORM_PRIORITY-1, true);
+ }
+
+ }, 61*1000);
+ }
+
+ public void store(PersistentBlobTempBucket bucket, ObjectContainer
container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Storing bucket "+bucket+" for slot
"+bucket.index+" to database");
+ long index = bucket.index;
+ PersistentBlobTempBucketTag tag = bucket.tag;
+ container.activate(tag, 1);
+ if(tag.bucket != null && tag.bucket != bucket) {
+ Logger.error(this, "Slot "+index+" already occupied!:
"+tag.bucket+" for "+tag.index);
+ throw new IllegalStateException("Slot "+index+" already
occupied!");
+ }
+ tag.bucket = bucket;
+ tag.isFree = false;
+ container.store(tag);
+ container.store(bucket);
+ synchronized(this) {
+ notCommittedBlobs.remove(index);
+ }
+ }
+
+ public synchronized void postCommit() {
+ int freeNow = freeSlots.size();
+ int sz = freeNow + almostFreeSlots.size();
+ if(sz > MAX_FREE) {
+ Iterator<Map.Entry<Long,PersistentBlobTempBucketTag>>
it = almostFreeSlots.entrySet().iterator();
+ for(int i=freeNow;i<MAX_FREE && it.hasNext();i++) {
+ Map.Entry<Long,PersistentBlobTempBucketTag>
entry = it.next();
+ freeSlots.put(entry.getKey(), entry.getValue());
+ }
+ } else {
+ freeSlots.putAll(almostFreeSlots);
+ }
+ almostFreeSlots.clear();
+ }
+
+ public Bucket createShadow(PersistentBlobTempBucket bucket) {
+ long index = bucket.index;
+ Long i = index;
+ synchronized(this) {
+ if(shadows.containsKey(i)) return null;
+ PersistentBlobTempBucket shadow = new
PersistentBlobTempBucket(this, blockSize, index, null, true);
+ shadow.size = bucket.size;
+ shadows.put(i, shadow);
+ return shadow;
+ }
+ }
+
+ public synchronized void freeShadow(long index,
PersistentBlobTempBucket bucket) {
+ PersistentBlobTempBucket temp = shadows.remove(index);
+ if(temp != bucket) {
+ Logger.error(this, "Freed wrong shadow: "+temp+" should
be "+bucket);
+ shadows.put(index, temp);
+ }
+ }
+
+ public void addBlobFreeCallback(DBJob job) {
+ synchronized(this) {
+ freeJobs.add(job);
+ }
+ }
+
+ public void removeBlobFreeCallback(DBJob job) {
+ synchronized(this) {
+ freeJobs.remove(job);
+ }
+ }
+
+}
Copied: trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketTag.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/PersistentBlobTempBucketTag.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketTag.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/PersistentBlobTempBucketTag.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,19 @@
+package freenet.support.io;
+
+public class PersistentBlobTempBucketTag {
+
+ final PersistentBlobTempBucketFactory factory;
+ final long index;
+ PersistentBlobTempBucket bucket;
+ /** db4o bug: http://tracker.db4o.com/browse/COR-1446
+ * We cannot just query for bucket == null, because it will instantiate
every
+ * object with bucket != null during the query even though we have an
index. */
+ boolean isFree;
+
+ PersistentBlobTempBucketTag(PersistentBlobTempBucketFactory f, long
idx) {
+ factory = f;
+ index = idx;
+ isFree = true;
+ }
+
+}
Modified: trunk/freenet/src/freenet/support/io/PersistentFileTracker.java
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentFileTracker.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/PersistentFileTracker.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -15,7 +15,7 @@
* next serialization to disk.
* @param bucket The bucket to free. Should be a DelayedFreeBucket.
*/
- public void delayedFreeBucket(Bucket bucket);
+ public void delayedFreeBucket(DelayedFreeBucket bucket);
/**
* Get the persistent temp files directory.
Modified: trunk/freenet/src/freenet/support/io/PersistentTempBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentTempBucketFactory.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/PersistentTempBucketFactory.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -6,12 +6,20 @@
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.Random;
+import com.db4o.ObjectContainer;
+import com.db4o.ObjectSet;
+import com.db4o.query.Predicate;
+
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
import freenet.crypt.RandomSource;
+import freenet.keys.CHKBlock;
+import freenet.node.Ticker;
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
@@ -27,28 +35,35 @@
*/
public class PersistentTempBucketFactory implements BucketFactory,
PersistentFileTracker {
- /** Directory containing persistent temporary files */
- private final File dir;
-
/** Original contents of directory */
private HashSet<File> originalFiles;
/** Filename generator */
- private final FilenameGenerator fg;
+ public final FilenameGenerator fg;
/** Random number generator */
- private final RandomSource strongPRNG;
- private final Random weakPRNG;
+ private transient RandomSource strongPRNG;
+ private transient Random weakPRNG;
/** Buckets to free */
- private LinkedList<Bucket> bucketsToFree;
+ private final ArrayList<DelayedFreeBucket> bucketsToFree;
+ private final long nodeDBHandle;
+
private volatile boolean encrypt;
+
+ private final PersistentBlobTempBucketFactory blobFactory;
+
+ static final int BLOB_SIZE = CHKBlock.DATA_LENGTH;
+
+ /** Don't store the bucketsToFree unless it's been modified since we
last stored it. */
+ private transient boolean modifiedBucketsToFree;
- public PersistentTempBucketFactory(File dir, final String prefix,
RandomSource strongPRNG, Random weakPRNG, boolean encrypt) throws IOException {
+ public PersistentTempBucketFactory(File dir, final String prefix,
RandomSource strongPRNG, Random weakPRNG, boolean encrypt, long nodeDBHandle)
throws IOException {
boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
- this.dir = dir;
+ blobFactory = new PersistentBlobTempBucketFactory(BLOB_SIZE,
nodeDBHandle, new File(dir, "persistent-blob.tmp"));
this.strongPRNG = strongPRNG;
+ this.nodeDBHandle = nodeDBHandle;
this.weakPRNG = weakPRNG;
this.encrypt = encrypt;
this.fg = new FilenameGenerator(weakPRNG, false, dir, prefix);
@@ -79,9 +94,15 @@
originalFiles.add(f);
}
- bucketsToFree = new LinkedList<Bucket>();
+ bucketsToFree = new ArrayList<DelayedFreeBucket>();
}
+ public void init(File dir, String prefix, RandomSource strongPRNG,
Random weakPRNG) throws IOException {
+ this.strongPRNG = strongPRNG;
+ this.weakPRNG = weakPRNG;
+ fg.init(dir, prefix, weakPRNG);
+ }
+
public void register(File file) {
synchronized(this) {
if(originalFiles == null)
@@ -97,41 +118,59 @@
* Deletes any old temp files still unclaimed.
*/
public synchronized void completedInit() {
- Iterator<File> i = originalFiles.iterator();
- while(i.hasNext()) {
- File f = (i.next());
- if(Logger.shouldLog(Logger.MINOR, this))
- Logger.minor(this, "Deleting old tempfile "+f);
- f.delete();
- }
+ // Persisting requests in the database means we don't
register() files...
+ // So keep all the temp files for now.
+ // FIXME: tidy up unwanted temp files.
+
+// Iterator<File> i = originalFiles.iterator();
+// while(i.hasNext()) {
+// File f = (File) (i.next());
+// if(Logger.shouldLog(Logger.MINOR, this))
+// Logger.minor(this, "Deleting old tempfile "+f);
+// f.delete();
+// }
originalFiles = null;
}
public Bucket makeBucket(long size) throws IOException {
- PersistentTempFileBucket rawBucket = new
PersistentTempFileBucket(fg.makeRandomFilename(), fg);
- Bucket maybeEncryptedBucket = (encrypt ? new
PaddedEphemerallyEncryptedBucket(rawBucket, 1024, strongPRNG, weakPRNG) :
rawBucket);
- return new DelayedFreeBucket(this, maybeEncryptedBucket);
+ Bucket rawBucket = null;
+ boolean mustWrap = true;
+ if(size == BLOB_SIZE) {
+ // No need for a DelayedFreeBucket, we handle this
internally (and more efficiently) for blobs.
+ mustWrap = false;
+ rawBucket = blobFactory.makeBucket();
+ }
+ if(rawBucket == null)
+ rawBucket = new
PersistentTempFileBucket(fg.makeRandomFilename(), fg);
+ if(encrypt)
+ rawBucket = new
PaddedEphemerallyEncryptedBucket(rawBucket, 1024, strongPRNG, weakPRNG);
+ if(mustWrap)
+ rawBucket = new DelayedFreeBucket(this, rawBucket);
+ return rawBucket;
}
/**
* Free an allocated bucket, but only after the change has been written
to disk.
*/
- public void delayedFreeBucket(Bucket b) {
+ public void delayedFreeBucket(DelayedFreeBucket b) {
synchronized(this) {
bucketsToFree.add(b);
+ modifiedBucketsToFree = true;
}
}
- public LinkedList<Bucket> grabBucketsToFree() {
+ private DelayedFreeBucket[] grabBucketsToFree() {
synchronized(this) {
- LinkedList<Bucket> toFree = bucketsToFree;
- bucketsToFree = new LinkedList<Bucket>();
- return toFree;
+ if(bucketsToFree.isEmpty()) return null;
+ DelayedFreeBucket[] buckets = bucketsToFree.toArray(new
DelayedFreeBucket[bucketsToFree.size()]);
+ bucketsToFree.clear();
+ modifiedBucketsToFree = true;
+ return buckets;
}
}
public File getDir() {
- return dir;
+ return fg.getDir();
}
public FilenameGenerator getGenerator() {
@@ -150,7 +189,73 @@
return encrypt;
}
+ @SuppressWarnings("serial")
+ public static PersistentTempBucketFactory load(File dir, String prefix,
RandomSource random, Random fastWeakRandom, ObjectContainer container, final
long nodeDBHandle, boolean encrypt, DBJobRunner jobRunner, Ticker ticker)
throws IOException {
+ ObjectSet<PersistentTempBucketFactory> results =
container.query(new Predicate<PersistentTempBucketFactory>() {
+ public boolean match(PersistentTempBucketFactory
factory) {
+ if(factory.nodeDBHandle == nodeDBHandle) return
true;
+ return false;
+ }
+ });
+ if(results.hasNext()) {
+ PersistentTempBucketFactory factory = results.next();
+ container.activate(factory, 5);
+ factory.init(dir, prefix, random, fastWeakRandom);
+ factory.setEncryption(encrypt);
+ factory.blobFactory.onInit(container, jobRunner,
fastWeakRandom, new File(dir, "persistent-blob.tmp"), BLOB_SIZE, ticker);
+ return factory;
+ } else {
+ PersistentTempBucketFactory factory =
+ new PersistentTempBucketFactory(dir, prefix,
random, fastWeakRandom, encrypt, nodeDBHandle);
+ factory.blobFactory.onInit(container, jobRunner,
fastWeakRandom, new File(dir, "persistent-blob.tmp"), BLOB_SIZE, ticker);
+ return factory;
+ }
+ }
+
public void setEncryption(boolean encrypt) {
this.encrypt = encrypt;
}
+
+ public void preCommit(ObjectContainer db) {
+ synchronized(this) {
+ if(!modifiedBucketsToFree) return;
+ modifiedBucketsToFree = false;
+ for(DelayedFreeBucket bucket : bucketsToFree) {
+ db.activate(bucket, 1);
+ bucket.storeTo(db);
+ }
+ db.store(bucketsToFree);
+ }
+ }
+
+ public void postCommit(ObjectContainer db) {
+ blobFactory.postCommit();
+ DelayedFreeBucket[] toFree = grabBucketsToFree();
+ if(toFree == null || toFree.length == 0) return;
+ int x = 0;
+ for(DelayedFreeBucket bucket : toFree) {
+ try {
+ if(bucket.toFree())
+ bucket.realFree();
+ if(bucket.toRemove())
+ bucket.realRemoveFrom(db);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught "+t+" freeing bucket
"+bucket+" after transaction commit", t);
+ }
+ x++;
+ }
+ if(x > 1024) {
+ db.store(bucketsToFree);
+ // Lots of buckets freed, commit now to reduce memory
footprint.
+ db.commit();
+ }
+ }
+
+ public void addBlobFreeCallback(DBJob job) {
+ blobFactory.addBlobFreeCallback(job);
+ }
+
+ public void removeBlobFreeCallback(DBJob job) {
+ blobFactory.removeBlobFreeCallback(job);
+ }
}
Modified: trunk/freenet/src/freenet/support/io/PersistentTempFileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/PersistentTempFileBucket.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/PersistentTempFileBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -1,15 +1,21 @@
package freenet.support.io;
import java.io.File;
+import java.io.IOException;
+import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
public class PersistentTempFileBucket extends TempFileBucket {
- protected PersistentTempFileBucket(long id, FilenameGenerator
generator) {
- super(id, generator);
+ public PersistentTempFileBucket(long id, FilenameGenerator generator) {
+ this(id, generator, true);
}
+
+ protected PersistentTempFileBucket(long id, FilenameGenerator
generator, boolean deleteOnFree) {
+ super(id, generator, false, deleteOnFree);
+ }
@Override
protected boolean deleteOnFinalize() {
@@ -40,7 +46,7 @@
} catch (NumberFormatException e) {
throw new CannotCreateFromFieldSetException("Corrupt
length "+tmp, e);
}
- Bucket bucket = new PersistentTempFileBucket(id,
f.getGenerator());
+ Bucket bucket = new PersistentTempFileBucket(id,
f.getGenerator(), true);
if(file.exists()) // no point otherwise!
f.register(file);
return bucket;
@@ -55,4 +61,14 @@
return fs;
}
+ /** Must override createShadow() so it creates a persistent bucket,
which will have
+ * deleteOnExit() = deleteOnFinalize() = false.
+ */
+ public Bucket createShadow() throws IOException {
+ PersistentTempFileBucket ret = new
PersistentTempFileBucket(filenameID, generator, false);
+ ret.setReadOnly();
+ if(!getFile().exists()) Logger.error(this, "File does not exist
when creating shadow: "+getFile());
+ return ret;
+ }
+
}
Modified: trunk/freenet/src/freenet/support/io/ReadOnlyFileSliceBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/ReadOnlyFileSliceBucket.java
2009-04-01 20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/ReadOnlyFileSliceBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -10,6 +10,8 @@
import java.io.OutputStream;
import java.io.RandomAccessFile;
+import com.db4o.ObjectContainer;
+
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -23,7 +25,7 @@
private final long length;
public ReadOnlyFileSliceBucket(File f, long startAt, long length) {
- this.file = f;
+ this.file = new File(f.getPath()); // copy so we can delete it
this.startAt = startAt;
this.length = length;
}
@@ -148,4 +150,25 @@
fs.put("Length", length);
return fs;
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ container.delete(file);
+ container.delete(this);
+ }
+
+ public void objectOnActivate(ObjectContainer container) {
+ // Cascading activation of dependancies
+ container.activate(file, 5);
+ }
+
+ public Bucket createShadow() throws IOException {
+ String fnam = new String(file.getPath());
+ File newFile = new File(fnam);
+ return new ReadOnlyFileSliceBucket(newFile, startAt, length);
+ }
+
}
Copied: trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucket.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/SegmentedBucketChainBucket.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucket.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucket.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,533 @@
+package freenet.support.io;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.client.async.DBJobRunner;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+
+/**
+ * Splits a large persistent file into a series of buckets, which are
collected
+ * into groups called segments to avoid huge transactions/memory usage.
+ *
+ * NON-PERSISTENT: This class uses persistent buckets, it stores them in the
+ * database to save memory, but the bucket itself does not support persistence.
+ * Making it support persistence cleanly requires major refactoring. The
obvious
+ * avenues are:
+ *
+ * 1. Pass DBJobRunner in to getOutputStream(), getInputStream(), free(),
+ * storeTo(). This will touch hundreds of files, mostly it is trivial though.
It
+ * will however increase the overhead for all buckets slightly.
+ * 2. Make DBJobRunner a static variable, hence there will be only one database
+ * thread for the whole VM, even in a simulation with more than one node. One
+ * difficulty with this is that the node.db4o needs to be in the correct
directory,
+ * yet the database would need to be initiated very early on.
+ *
+ * Generally we create it, write to it, call getBuckets() and clear(), anyway
...
+ *
+ * @author Matthew Toseland <toad at amphibian.dyndns.org> (0xE43DA450)
+ */
+public class SegmentedBucketChainBucket implements NotPersistentBucket {
+
+ private final ArrayList<SegmentedChainBucketSegment> segments;
+ private boolean readOnly;
+ public final long bucketSize;
+ public final int segmentSize;
+ private long size;
+ private boolean freed;
+ final BucketFactory bf;
+ private transient DBJobRunner dbJobRunner;
+
+ public SegmentedBucketChainBucket(int blockSize, BucketFactory factory,
+ DBJobRunner runner, int segmentSize2) {
+ bucketSize = blockSize;
+ bf = factory;
+ dbJobRunner = runner;
+ segmentSize = segmentSize2;
+ segments = new ArrayList<SegmentedChainBucketSegment>();
+ }
+
+ public Bucket createShadow() throws IOException {
+ return null;
+ }
+
+ public void free() {
+ synchronized(this) {
+ freed = true;
+ clearing = false;
+ }
+
+ // Due to memory issues, we cannot complete the cleanup before
returning, especially if we are already on the database thread...
+ DBJob freeJob = new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ SegmentedChainBucketSegment segment = null;
+
if(!container.ext().isStored(SegmentedBucketChainBucket.this)) {
+ Logger.error(this, "Bucket not stored
in freeJob, already deleted???");
+ container.delete(this);
+ return;
+ }
+ synchronized(this) {
+ if(!segments.isEmpty())
+ segment = segments.remove(0);
+ }
+ if(segment != null) {
+ container.activate(segment, 1);
+ if(Logger.shouldLog(Logger.MINOR,
SegmentedBucketChainBucket.this))
+
Logger.minor(SegmentedBucketChainBucket.this, "Freeing segment "+segment);
+ segment.activateBuckets(container);
+ segment.free();
+ segment.removeFrom(container);
+ synchronized(this) {
+ if(!segments.isEmpty()) {
+ dbJobRunner.queue(this,
NativeThread.HIGH_PRIORITY, true);
+
dbJobRunner.queueRestartJob(this, NativeThread.HIGH_PRIORITY, container, false);
+ container.store(this);
+ return;
+ }
+ }
+ }
+ container.delete(segments);
+
container.delete(SegmentedBucketChainBucket.this);
+ container.delete(this);
+ synchronized(SegmentedBucketChainBucket.this) {
+ if(killMe == null) return;
+ }
+ dbJobRunner.removeRestartJob(killMe,
NativeThread.HIGH_PRIORITY, container);
+ container.delete(killMe);
+ }
+
+ };
+
+ dbJobRunner.runBlocking(freeJob, NativeThread.HIGH_PRIORITY);
+ }
+
+ public InputStream getInputStream() throws IOException {
+ synchronized(this) {
+ if(freed || clearing) throw new IOException("Freed");
+ }
+ return new InputStream() {
+
+ int segmentNo = -1;
+ int bucketNo = segmentSize;
+ SegmentedChainBucketSegment seg = null;
+ Bucket[] buckets = null;
+ InputStream is = null;
+ private long bucketRead = 0;
+ private boolean closed;
+
+ @Override
+ public int read() throws IOException {
+ byte[] b = new byte[1];
+ if(read(b, 0, 1) <= 0) return -1;
+ return b[0];
+ }
+
+ @Override
+ public int read(byte[] buf) throws IOException {
+ return read(buf, 0, buf.length);
+ }
+
+ @Override
+ public int read(byte[] buf, int offset, int length)
throws IOException {
+ if(closed) throw new IOException("Already
closed");
+ if(bucketRead == bucketSize || is == null) {
+ if(is != null)
+ is.close();
+ if(buckets != null)
+ buckets[bucketNo] = null;
+ bucketRead = 0;
+ bucketNo++;
+ if(bucketNo == segmentSize || buckets
== null) {
+ bucketNo = 0;
+ segmentNo++;
+ seg = getSegment(segmentNo);
+ if(seg == null) return -1;
+ buckets = getBuckets(seg);
+ }
+ if(bucketNo >= buckets.length) {
+
synchronized(SegmentedBucketChainBucket.this) {
+ if(segmentNo >=
segments.size())
+ // No more data
+ return -1;
+ }
+ buckets = getBuckets(seg);
+ if(bucketNo >= buckets.length)
+ return -1;
+ }
+ is = buckets[bucketNo].getInputStream();
+ }
+ int r = is.read(buf, offset, length);
+ if(r > 0)
+ bucketRead += r;
+ return r;
+ }
+
+ @Override
+ public void close() throws IOException {
+ if(closed) return;
+ if(is != null) is.close();
+ closed = true;
+ is = null;
+ seg = null;
+ buckets = null;
+ }
+
+ };
+ }
+
+ protected synchronized SegmentedChainBucketSegment getSegment(int i) {
+ return segments.get(i);
+ }
+
+ protected Bucket[] getBuckets(final SegmentedChainBucketSegment seg) {
+ final BucketArrayWrapper baw = new BucketArrayWrapper();
+ dbJobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ container.activate(seg, 1);
+ synchronized(baw) {
+ baw.buckets = seg.shallowCopyBuckets();
+ }
+ container.deactivate(seg, 1);
+ }
+
+ }, NativeThread.HIGH_PRIORITY);
+ synchronized(baw) {
+ return baw.buckets;
+ }
+ }
+
+ public String getName() {
+ return "SegmentedBucketChainBucket";
+ }
+
+ public OutputStream getOutputStream() throws IOException {
+ final SegmentedChainBucketSegment[] segs;
+ synchronized(this) {
+ if(readOnly) throw new IOException("Read-only");
+ if(freed || clearing) throw new IOException("Freed");
+ size = 0;
+ segs = segments.toArray(new
SegmentedChainBucketSegment[segments.size()]);
+ segments.clear();
+ }
+ for(int i=0;i<segs.length;i++)
+ segs[i].free();
+ if(segs.length > 0) {
+ dbJobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ for(int i=0;i<segs.length;i++) {
+ segs[i].removeFrom(container);
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY);
+ }
+ return new OutputStream() {
+
+ int segmentNo = 0;
+ int bucketNo = 0;
+ SegmentedChainBucketSegment seg =
makeSegment(segmentNo, null);
+ OutputStream cur = seg.makeBucketStream(bucketNo);
+ private long bucketLength;
+ private boolean closed;
+
+ @Override
+ public void write(int arg0) throws IOException {
+ write(new byte[] { (byte)arg0 });
+ }
+
+ @Override
+ public void write(byte[] buf) throws IOException {
+ write(buf, 0, buf.length);
+ }
+
+ @Override
+ public void write(byte[] buf, int offset, int length)
throws IOException {
+ boolean ro;
+ synchronized(SegmentedBucketChainBucket.this) {
+ ro = readOnly;
+ }
+ if(ro) {
+ if(!closed) close();
+ throw new IOException("Read-only");
+ }
+ if(closed) throw new IOException("Already
closed");
+ while(length > 0) {
+ if(bucketLength == bucketSize) {
+ bucketNo++;
+ cur.close();
+ if(bucketNo == segmentSize) {
+ bucketNo = 0;
+ segmentNo++;
+ seg =
makeSegment(segmentNo, seg);
+ }
+ cur =
seg.makeBucketStream(bucketNo);
+ bucketLength = 0;
+ }
+ int left =
(int)Math.min(Integer.MAX_VALUE, bucketSize - bucketLength);
+ int write = Math.min(left, length);
+ cur.write(buf, offset, write);
+ offset += write;
+ length -= write;
+ bucketLength += write;
+
synchronized(SegmentedBucketChainBucket.class) {
+ size += write;
+ }
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if(closed) return;
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Closing "+this+"
for "+SegmentedBucketChainBucket.this);
+ cur.close();
+ closed = true;
+ cur = null;
+ final SegmentedChainBucketSegment oldSeg = seg;
+ seg = null;
+ dbJobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer
container, ClientContext context) {
+
if(container.ext().isStored(oldSeg)) {
+
if(!container.ext().isActive(oldSeg)) {
+
Logger.error(this, "OLD SEGMENT STORED BUT NOT ACTIVE: "+oldSeg, new
Exception("error"));
+
container.activate(oldSeg, 1);
+ }
+ }
+ oldSeg.storeTo(container);
+ container.ext().store(segments,
1);
+
container.ext().store(SegmentedBucketChainBucket.this, 1);
+ container.deactivate(oldSeg, 1);
+ // If there is only one
segment, we didn't add a killMe.
+ // Add one now.
+
synchronized(SegmentedBucketChainBucket.this) {
+ if(killMe != null)
return;
+ killMe = new
SegmentedBucketChainBucketKillJob(SegmentedBucketChainBucket.this);
+ }
+
killMe.scheduleRestart(container, context);
+ }
+
+ }, NativeThread.HIGH_PRIORITY);
+ }
+ };
+ }
+
+ private transient SegmentedBucketChainBucketKillJob killMe;
+
+ private transient boolean runningSegStore;
+
+ protected SegmentedChainBucketSegment makeSegment(int index, final
SegmentedChainBucketSegment oldSeg) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Make a segment for "+this+" index
"+index+ "old "+oldSeg);
+ if(oldSeg != null) {
+ synchronized(this) {
+ while(runningSegStore) {
+ Logger.normal(this, "Waiting for last
segment-store job to finish on "+this);
+ try {
+ wait();
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ runningSegStore = true;
+ }
+ try {
+ dbJobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ try {
+ oldSeg.storeTo(container);
+ container.ext().store(segments,
1);
+
container.ext().store(SegmentedBucketChainBucket.this, 1);
+ container.deactivate(oldSeg, 1);
+
synchronized(SegmentedBucketChainBucket.this) {
+ if(killMe != null)
return;
+ killMe = new
SegmentedBucketChainBucketKillJob(SegmentedBucketChainBucket.this);
+ }
+
killMe.scheduleRestart(container, context);
+ } finally {
+
synchronized(SegmentedBucketChainBucket.this) {
+ runningSegStore = false;
+
SegmentedBucketChainBucket.this.notifyAll();
+ }
+ }
+ }
+
+ }, NativeThread.HIGH_PRIORITY-1);
+ } catch (Throwable t) {
+ Logger.error(this, "Caught throwable: "+t, t);
+ runningSegStore = false;
+ }
+ }
+ synchronized(this) {
+ SegmentedChainBucketSegment seg = new
SegmentedChainBucketSegment(this);
+ if(segments.size() != index) throw new
IllegalArgumentException("Asked to add segment "+index+" but segments length is
"+segments.size());
+ segments.add(seg);
+ return seg;
+ }
+ }
+
+ public boolean isReadOnly() {
+ return readOnly;
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ // Valid no-op if we haven't been stored.
+ }
+
+ public void setReadOnly() {
+ readOnly = true;
+ }
+
+ public synchronized long size() {
+ return size;
+ }
+
+ /**
+ * Note that we don't recurse inside the segments, as it would produce
a huge
+ * transaction. So you will need to close the OutputStream to commit
the
+ * progress of writing to a file. And yes, we can't append. So you need
to
+ * write everything before storing the bucket.
+ *
+ * FIXME: Enforce the rule that you must close any OutputStream's
before
+ * calling storeTo().
+ */
+ public void storeTo(ObjectContainer container) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Bucket[] getBuckets() {
+ final BucketArrayWrapper baw = new BucketArrayWrapper();
+ dbJobRunner.runBlocking(new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+ baw.buckets = getBuckets(container);
+ }
+
+ }, NativeThread.HIGH_PRIORITY);
+ return baw.buckets;
+ }
+
+ protected synchronized Bucket[] getBuckets(ObjectContainer container) {
+ int segs = segments.size();
+ if(segs == 0) return new Bucket[0];
+ SegmentedChainBucketSegment seg = segments.get(segs-1);
+ container.activate(seg, 1);
+ seg.activateBuckets(container);
+ int size = (segs - 1) * segmentSize + seg.size();
+ Bucket[] buckets = new Bucket[size];
+ seg.shallowCopyBuckets(buckets, (segs-1)*segmentSize);
+ container.deactivate(seg, 1);
+ int pos = 0;
+ for(int i=0;i<(segs-1);i++) {
+ seg = segments.get(i);
+ container.activate(seg, 1);
+ seg.activateBuckets(container);
+ seg.shallowCopyBuckets(buckets, pos);
+ container.deactivate(seg, 1);
+ pos += segmentSize;
+ }
+ return buckets;
+ }
+
+
+ private boolean clearing;
+
+ public synchronized void clear() {
+ // Due to memory issues, we cannot complete this before we
return
+ synchronized(this) {
+ clearing = true;
+ }
+ DBJob clearJob = new DBJob() {
+
+ public void run(ObjectContainer container,
ClientContext context) {
+
if(!container.ext().isStored(SegmentedBucketChainBucket.this)) {
+ Logger.error(this, "Bucket not stored
in clearJob, already deleted???");
+ container.delete(this);
+ return;
+ }
+ SegmentedChainBucketSegment segment = null;
+ synchronized(this) {
+ if(!segments.isEmpty())
+ segment = segments.remove(0);
+ }
+ if(segment != null) {
+ container.activate(segment, 1);
+ if(Logger.shouldLog(Logger.MINOR,
SegmentedBucketChainBucket.this))
+
Logger.minor(SegmentedBucketChainBucket.this, "Clearing segment "+segment);
+ segment.clear(container);
+ synchronized(this) {
+ if(!segments.isEmpty()) {
+ dbJobRunner.queue(this,
NativeThread.HIGH_PRIORITY-1, true);
+
dbJobRunner.queueRestartJob(this, NativeThread.HIGH_PRIORITY-1, container,
false);
+
container.store(segments);
+
container.store(SegmentedBucketChainBucket.this);
+ return;
+ }
+ }
+ }
+ container.delete(segments);
+
container.delete(SegmentedBucketChainBucket.this);
+ container.delete(this);
+ synchronized(SegmentedBucketChainBucket.this) {
+ if(killMe == null) return;
+ }
+ dbJobRunner.removeRestartJob(killMe,
NativeThread.HIGH_PRIORITY, container);
+ container.delete(killMe);
+ }
+
+ };
+ dbJobRunner.runBlocking(clearJob, NativeThread.HIGH_PRIORITY-1);
+ }
+
+ /**
+ * @param container
+ * @return True if there is more work to do. We don't want to do
everything in one transaction because
+ * this bucket could be enormous.
+ */
+ synchronized boolean removeContents(ObjectContainer container) {
+ boolean logMINOR = Logger.shouldLog(Logger.MINOR, this);
+ while(segments.size() > 0) {
+ Logger.normal(this, "Freeing unfinished unstored bucket
"+this+" segments left "+segments.size());
+ // Remove the first so the space is reused at the
beginning not at the end.
+ // Removing from the end results in not shrinking.
+ SegmentedChainBucketSegment seg = segments.remove(0);
+ if(seg == null) {
+ // Already removed.
+ continue;
+ }
+ container.activate(seg, 1);
+ if(logMINOR) Logger.minor(this, "Removing segment
"+seg+" size "+seg.size());
+ if(clearing) {
+ seg.clear(container);
+ } else {
+ seg.activateBuckets(container);
+ seg.free();
+ seg.removeFrom(container);
+ }
+ if(segments.size() > 0) {
+ container.store(segments);
+ container.store(this);
+ return true; // Do some more in the next
transaction
+ } else break;
+ }
+ if(logMINOR) Logger.minor(this, "Removed segments for "+this);
+ container.delete(segments);
+ container.delete(this);
+ if(logMINOR) Logger.minor(this, "Removed "+this);
+ freed = true; // Just in case it wasn't already.
+ return false;
+ }
+
+}
Copied:
trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucketKillJob.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/SegmentedBucketChainBucketKillJob.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucketKillJob.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/SegmentedBucketChainBucketKillJob.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,42 @@
+package freenet.support.io;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.client.async.DBJob;
+import freenet.support.Logger;
+
+public class SegmentedBucketChainBucketKillJob implements DBJob {
+
+ final SegmentedBucketChainBucket bcb;
+
+ private final short RESTART_PRIO = NativeThread.HIGH_PRIORITY;
+
+ public SegmentedBucketChainBucketKillJob(SegmentedBucketChainBucket
bucket) {
+ bcb = bucket;
+ }
+
+ public void run(ObjectContainer container, ClientContext context) {
+ container.activate(bcb, 2);
+ Logger.normal(this, "Freeing unfinished unstored bucket "+this);
+ // Restart jobs runner will remove us from the queue.
+ // This may take more than one transaction ...
+ if(bcb.removeContents(container)) {
+ // More work needs to be done.
+ // We will have already been removed, so re-add, in
case we crash soon.
+ scheduleRestart(container, context);
+
context.persistentBucketFactory.addBlobFreeCallback(this);
+ // But try to sort it out now ...
+ context.jobRunner.queue(this,
NativeThread.NORM_PRIORITY, true);
+ } else {
+ context.jobRunner.removeRestartJob(this, RESTART_PRIO,
container);
+ container.delete(this);
+
context.persistentBucketFactory.removeBlobFreeCallback(this);
+ }
+ }
+
+ public void scheduleRestart(ObjectContainer container, ClientContext
context) {
+ context.jobRunner.queueRestartJob(this, RESTART_PRIO,
container, true);
+ }
+
+}
Copied: trunk/freenet/src/freenet/support/io/SegmentedChainBucketSegment.java
(from rev 26320,
branches/db4o/freenet/src/freenet/support/io/SegmentedChainBucketSegment.java)
===================================================================
--- trunk/freenet/src/freenet/support/io/SegmentedChainBucketSegment.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/SegmentedChainBucketSegment.java
2009-04-01 20:34:09 UTC (rev 26322)
@@ -0,0 +1,95 @@
+package freenet.support.io;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+
+import com.db4o.ObjectContainer;
+
+import freenet.client.async.ClientContext;
+import freenet.support.Logger;
+import freenet.support.api.Bucket;
+
+public class SegmentedChainBucketSegment {
+
+ private final ArrayList<Bucket> buckets;
+ private final SegmentedBucketChainBucket bcb;
+
+ public SegmentedChainBucketSegment(SegmentedBucketChainBucket bucket) {
+ this.bcb = bucket;
+ this.buckets = new ArrayList<Bucket>();
+ }
+
+ public void free() {
+ for(Bucket bucket : buckets) {
+ if(bucket == null) {
+ Logger.error(this, "Bucket is null on "+this);
+ continue;
+ }
+ bucket.free();
+ }
+ }
+
+ public void storeTo(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Storing segment "+this);
+ for(Bucket bucket : buckets)
+ bucket.storeTo(container);
+ container.ext().store(buckets, 1);
+ container.ext().store(this, 1);
+ }
+
+ public synchronized Bucket[] shallowCopyBuckets() {
+ int sz = buckets.size();
+ Bucket[] out = new Bucket[sz];
+ for(int i=0;i<sz;i++) out[i] = buckets.get(i);
+ return out;
+ }
+
+ public synchronized void shallowCopyBuckets(Bucket[] out, int index) {
+ int sz = buckets.size();
+ for(int i=0;i<sz;i++) out[index++] = buckets.get(i);
+ }
+
+ public OutputStream makeBucketStream(int bucketNo) throws IOException {
+ if(bucketNo >= bcb.segmentSize)
+ throw new IllegalArgumentException("Too many buckets in
segment");
+ Bucket b = bcb.bf.makeBucket(bcb.bucketSize);
+ synchronized(this) {
+ if(buckets.size() != bucketNo)
+ throw new IllegalArgumentException("Next bucket
should be "+buckets.size()+" but is "+bucketNo);
+ buckets.add(b);
+ }
+ return b.getOutputStream();
+ }
+
+ public int size() {
+ return buckets.size();
+ }
+
+ void activateBuckets(ObjectContainer container) {
+ container.activate(buckets, 1);
+ for(Bucket bucket : buckets)
+ container.activate(bucket, 1); // will cascade
+ }
+
+ public void clear(ObjectContainer container) {
+ buckets.clear();
+ container.delete(buckets);
+ container.delete(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ for(Bucket bucket : buckets) {
+ if(bucket == null) {
+ // Probably not a problem...
+ continue;
+ }
+ container.activate(bucket, 1);
+ bucket.removeFrom(container);
+ }
+ container.delete(buckets);
+ container.delete(this);
+ }
+
+}
Modified: trunk/freenet/src/freenet/support/io/TempBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/TempBucketFactory.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/TempBucketFactory.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -14,6 +14,8 @@
import java.util.Vector;
import java.util.concurrent.LinkedBlockingQueue;
+import com.db4o.ObjectContainer;
+
import freenet.crypt.RandomSource;
import freenet.support.Executor;
import freenet.support.Logger;
@@ -363,6 +365,20 @@
}
}
}
+
+ public Bucket createShadow() throws IOException {
+ return currentBucket.createShadow();
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ currentBucket.removeFrom(container);
+ container.delete(this);
+ }
+
+ public void storeTo(ObjectContainer container) {
+ currentBucket.storeTo(container);
+ container.store(this);
+ }
private WeakReference<TempBucket> weakRef = new
WeakReference<TempBucket>(this);
@@ -527,7 +543,7 @@
private final Queue<WeakReference<TempBucket>> ramBucketQueue = new
LinkedBlockingQueue<WeakReference<TempBucket>>();
private Bucket _makeFileBucket() {
- Bucket fileBucket = new
TempFileBucket(filenameGenerator.makeRandomFilename(), filenameGenerator);
+ Bucket fileBucket = new
TempFileBucket(filenameGenerator.makeRandomFilename(), filenameGenerator, true,
true);
// Do we want it to be encrypted?
return (reallyEncrypt ? new
PaddedEphemerallyEncryptedBucket(fileBucket, 1024, strongPRNG, weakPRNG) :
fileBucket);
}
Modified: trunk/freenet/src/freenet/support/io/TempFileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/TempFileBucket.java 2009-04-01
20:12:14 UTC (rev 26321)
+++ trunk/freenet/src/freenet/support/io/TempFileBucket.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,7 +1,10 @@
package freenet.support.io;
import java.io.File;
+import java.io.IOException;
+import com.db4o.ObjectContainer;
+
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
import freenet.support.api.Bucket;
@@ -21,17 +24,27 @@
final FilenameGenerator generator;
private static boolean logDebug = true;
private boolean readOnly;
+ private final boolean deleteOnFree;
+
+ public TempFileBucket(long id, FilenameGenerator generator) {
+ this(id, generator, true, true);
+ }
+
/**
* Constructor for the TempFileBucket object
- *
- * @param f File
+ * Subclasses can call this constructor.
+ * @param deleteOnExit Set if you want the bucket deleted on shutdown.
Passed to
+ * the parent BaseFileBucket. You must also override deleteOnExit() and
+ * implement your own createShadow()!
+ * @param deleteOnFree True for a normal temp bucket, false for a
shadow.
*/
- public TempFileBucket(
+ protected TempFileBucket(
long id,
- FilenameGenerator generator) {
- super(generator.getFilename(id));
+ FilenameGenerator generator, boolean deleteOnExit, boolean
deleteOnFree) {
+ super(generator.getFilename(id), deleteOnExit);
this.filenameID = id;
this.generator = generator;
+ this.deleteOnFree = deleteOnFree;
synchronized(this) {
logDebug = Logger.shouldLog(Logger.DEBUG, this);
}
@@ -42,15 +55,17 @@
if (logDebug)
Logger.debug(
this,
- "Initializing TempFileBucket(" +
getFile());
+ "Initializing TempFileBucket(" +
getFile()+" deleteOnExit="+deleteOnExit);
}
+ if(deleteOnExit)
+ setDeleteOnExit(getFile());
}
@Override
protected boolean deleteOnFinalize() {
// Make sure finalize wacks temp file
// if it is not explictly freed.
- return true;
+ return true; // not if shadow
}
@Override
@@ -68,7 +83,7 @@
@Override
protected boolean deleteOnFree() {
- return true;
+ return deleteOnFree;
}
@Override
@@ -88,4 +103,23 @@
protected boolean deleteOnExit() {
return true;
}
+
+ public void storeTo(ObjectContainer container) {
+ container.store(generator);
+ container.store(this);
+ }
+
+ public void removeFrom(ObjectContainer container) {
+ if(Logger.shouldLog(Logger.MINOR, this))
+ Logger.minor(this, "Removing from database: "+this);
+ // filenameGenerator is a global, we don't need to worry about
it.
+ container.delete(this);
+ }
+
+ public Bucket createShadow() throws IOException {
+ TempFileBucket ret = new TempFileBucket(filenameID, generator,
true, false);
+ ret.setReadOnly();
+ if(!getFile().exists()) Logger.error(this, "File does not exist
when creating shadow: "+getFile());
+ return ret;
+ }
}
Deleted: trunk/freenet/src/net/i2p/util/NativeBigInteger.java
===================================================================
--- branches/db4o/freenet/src/net/i2p/util/NativeBigInteger.java
2009-04-01 19:43:11 UTC (rev 26320)
+++ trunk/freenet/src/net/i2p/util/NativeBigInteger.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,493 +0,0 @@
-package net.i2p.util;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain
- * with no warranty of any kind, either expressed or implied.
- * It probably won't make your computer catch on fire, or eat
- * your children, but it might. Use at your own risk.
- *
- */
-
-import java.math.BigInteger;
-import java.util.Random;
-import java.net.URL;
-import java.io.FileOutputStream;
-import java.io.InputStream;
-import java.io.IOException;
-import java.io.FileNotFoundException;
-import java.io.File;
-
-import freenet.support.HexUtil;
-import freenet.support.Logger;
-import freenet.support.CPUInformation.AMDCPUInfo;
-import freenet.support.CPUInformation.CPUID;
-import freenet.support.CPUInformation.CPUInfo;
-import freenet.support.CPUInformation.IntelCPUInfo;
-import freenet.support.CPUInformation.UnknownCPUException;
-import freenet.support.io.Closer;
-
-/**
- * <p>BigInteger that takes advantage of the jbigi library for the modPow
operation,
- * which accounts for a massive segment of the processing cost of asymmetric
- * crypto. It also takes advantage of the jbigi library for converting a
BigInteger
- * value to a double. Sun's implementation of the 'doubleValue()' method is
_very_ lousy.
- *
- * The jbigi library itself is basically just a JNI wrapper around the
- * GMP library - a collection of insanely efficient routines for dealing with
- * big numbers.</p>
- *
- * There are three environmental properties for configuring this component:
<ul>
- * <li><b>jbigi.enable</b>: whether to use the native library (defaults to
"true")</li>
- * <li><b>jbigi.impl</b>: select which resource to use as the native
implementation</li>
- * <li><b>jbigi.ref</b>: the file specified in this parameter may contain a
resource
- * name to override jbigi.impl (defaults to
"jbigi.cfg")</li>
- * </ul>
- *
- * <p>If jbigi.enable is set to false, this class won't even attempt to use
the
- * native library, but if it is set to true (or is not specified), it will
first
- * check the platform specific library path for the "jbigi" library, as
defined by
- * {@link Runtime#loadLibrary} - e.g. C:\windows\jbigi.dll or
/lib/libjbigi.so.
- * If that fails, it reviews the jbigi.impl environment property - if that is
set,
- * it checks all of the components in the CLASSPATH for the file specified and
- * attempts to load it as the native library. If jbigi.impl is not set, if
there
- * is no matching resource, or if that resource is not a valid OS/architecture
- * specific library, the NativeBigInteger will revert to using the pure java
- * implementation.</p>
- *
- * <p>That means <b>NativeBigInteger will not attempt to guess the correct
- * platform/OS/whatever</b> - applications using this class should define that
- * property prior to <i>referencing</i> the NativeBigInteger (or before loading
- * the JVM, of course). Alternately, people with custom built jbigi
implementations
- * in their OS's standard search path (LD_LIBRARY_PATH, etc) needn't
bother.</p>
- *
- * <p>One way to deploy the native library is to create a jbigi.jar file
containing
- * all of the native implementations with filenames such as "win-athlon",
"linux-p2",
- * "freebsd-sparcv4", where those files are the OS specific libraries (the
contents of
- * the DLL or .so file built for those OSes / architectures). The user would
then
- * simply specify -Djbigi.impl=win-athlon and this component would pick up
that
- * library.</p>
- *
- * <p>Another way is to create a seperate jbigi.jar file for each platform
containing
- * one file - "native", where that file is the OS / architecture specific
library
- * implementation, as above. This way the user would download the correct
jbigi.jar
- * (and not all of the libraries for platforms/OSes they don't need) and would
specify
- * -Djbigi.impl=native.</p>
- *
- * <p>Running this class by itself does a basic unit test and benchmarks the
- * NativeBigInteger.modPow/doubleValue vs. the BigInteger.modPow/doubleValue
by running a 2Kbit op 100
- * times. At the end of each test, if the native implementation is loaded
this will output
- * something like:</p>
- * <pre>
- * native run time: 6090ms (60ms each)
- * java run time: 68067ms (673ms each)
- * native = 8.947066860593239% of pure java time
- * </pre>
- *
- * <p>If the native implementation is not loaded, it will start by saying:</p>
- * <pre>
- * WARN: Native BigInteger library jbigi not loaded - using pure java
- * </pre>
- * <p>Then go on to run the test, finally outputting:</p>
- * <pre>
- * java run time: 64653ms (640ms each)
- * However, we couldn't load the native library, so this doesn't test much
- * </pre>
- *
- */
-public class NativeBigInteger extends BigInteger {
-
- /** did we load the native lib correctly? */
- private static boolean _nativeOk = false;
- /**
- * do we want to dump some basic success/failure info to stderr during
- * initialization? this would otherwise use the Log component, but
this makes
- * it easier for other systems to reuse this class
- */
- private static final boolean _doLog = true;
- private final static String JBIGI_OPTIMIZATION_K6 = "k6";
- private final static String JBIGI_OPTIMIZATION_K6_2 = "k62";
- private final static String JBIGI_OPTIMIZATION_K6_3 = "k63";
- private final static String JBIGI_OPTIMIZATION_ATHLON = "athlon";
- private final static String JBIGI_OPTIMIZATION_X86_64 = "x86_64";
- private final static String JBIGI_OPTIMIZATION_X86_64_32 = "x86_64_32";
- private final static String JBIGI_OPTIMIZATION_PENTIUM = "pentium";
- private final static String JBIGI_OPTIMIZATION_PENTIUMMMX =
"pentiummmx";
- private final static String JBIGI_OPTIMIZATION_PENTIUM2 = "pentium2";
- private final static String JBIGI_OPTIMIZATION_PENTIUM3 = "pentium3";
- private final static String JBIGI_OPTIMIZATION_PENTIUM4 = "pentium4";
- private final static String JBIGI_OPTIMIZATION_PPC = "ppc";
- private final static String sCPUType; //The CPU Type to optimize for
(one of the above strings)
- private static final long serialVersionUID = 0xc5392a97bb283dd2L;
-
- static {
- sCPUType = resolveCPUType();
- loadNative();
- }
-
- /** Tries to resolve the best type of CPU that we have an optimized
jbigi-dll/so for.
- * @return A string containing the CPU-type or null if CPU type is
unknown
- */
- private static String resolveCPUType() {
- try {
-
if(System.getProperty("os.arch").toLowerCase().matches("(i?[x0-9]86_64|amd64)"))
- return JBIGI_OPTIMIZATION_X86_64;
- else
if(System.getProperty("os.arch").toLowerCase().matches("(ppc)")) {
- System.out.println("Detected PowerPC!");
- return JBIGI_OPTIMIZATION_PPC;
- } else {
- CPUInfo c = CPUID.getInfo();
- if(c instanceof AMDCPUInfo) {
- AMDCPUInfo amdcpu = (AMDCPUInfo) c;
- if(amdcpu.IsAthlon64Compatible())
- return
JBIGI_OPTIMIZATION_X86_64_32;
- if(amdcpu.IsAthlonCompatible())
- return
JBIGI_OPTIMIZATION_ATHLON;
- if(amdcpu.IsK6_3_Compatible())
- return JBIGI_OPTIMIZATION_K6_3;
- if(amdcpu.IsK6_2_Compatible())
- return JBIGI_OPTIMIZATION_K6_2;
- if(amdcpu.IsK6Compatible())
- return JBIGI_OPTIMIZATION_K6;
- } else
- if(c instanceof IntelCPUInfo) {
- IntelCPUInfo intelcpu =
(IntelCPUInfo) c;
-
if(intelcpu.IsPentium4Compatible())
- return
JBIGI_OPTIMIZATION_PENTIUM4;
-
if(intelcpu.IsPentium3Compatible())
- return
JBIGI_OPTIMIZATION_PENTIUM3;
-
if(intelcpu.IsPentium2Compatible())
- return
JBIGI_OPTIMIZATION_PENTIUM2;
-
if(intelcpu.IsPentiumMMXCompatible())
- return
JBIGI_OPTIMIZATION_PENTIUMMMX;
-
if(intelcpu.IsPentiumCompatible())
- return
JBIGI_OPTIMIZATION_PENTIUM;
- }
- }
- return null;
- } catch(UnknownCPUException e) {
- return null; //TODO: Log something here maybe..
- }
- }
-
- /**
- * calculate (base ^ exponent) % modulus.
- *
- * @param base
- * big endian twos complement representation of the base
(but it must be positive)
- * @param exponent
- * big endian twos complement representation of the exponent
- * @param modulus
- * big endian twos complement representation of the modulus
- * @return big endian twos complement representation of (base ^
exponent) % modulus
- */
- public native static byte[] nativeModPow(byte base[], byte exponent[],
byte modulus[]);
-
- /**
- * Converts a BigInteger byte-array to a 'double'
- * @param ba Big endian twos complement representation of the
BigInteger to convert to a double
- * @return The plain double-value represented by 'ba'
- */
- public native static double nativeDoubleValue(byte ba[]);
- private byte[] cachedBa = null;
-
- public NativeBigInteger(byte val[]) {
- super(val);
- // Takes up too much RAM
-// int targetLength = bitLength() / 8 + 1;
-// if(val.length == targetLength)
-// cachedBa = val;
- }
-
- public NativeBigInteger(int signum, byte magnitude[]) {
- super(signum, magnitude);
- }
-
- public NativeBigInteger(int bitlen, int certainty, Random rnd) {
- super(bitlen, certainty, rnd);
- }
-
- public NativeBigInteger(int numbits, Random rnd) {
- super(numbits, rnd);
- }
-
- public NativeBigInteger(String val) {
- super(val);
- }
-
- public NativeBigInteger(String val, int radix) {
- super(val, radix);
- }
-
- /**Creates a new NativeBigInteger with the same value
- * as the supplied BigInteger. Warning!, not very efficent
- */
- public NativeBigInteger(BigInteger integer) {
- //Now, why doesn't sun provide a constructor
- //like this one in BigInteger?
- this(integer.toByteArray());
- }
-
- @Override
- public BigInteger modPow(BigInteger exponent, BigInteger m) {
- if(_nativeOk)
- return new NativeBigInteger(nativeModPow(toByteArray(),
exponent.toByteArray(), m.toByteArray()));
- else
- return new NativeBigInteger(super.modPow(exponent, m));
- }
-
- @Override
- public byte[] toByteArray() {
- if(cachedBa == null) //Since we are immutable it is safe to
never update the cached ba after it has initially been generated
- cachedBa = super.toByteArray();
- return cachedBa;
- }
-
- @Override
- public String toString(int radix) {
- if(radix == 16)
- return toHexString();
- return super.toString(radix);
- }
-
- public String toHexString() {
- byte[] buf = toByteArray();
- return HexUtil.bytesToHex(buf);
- }
-
- @Override
- public double doubleValue() {
- if(_nativeOk)
- return nativeDoubleValue(toByteArray());
- else
- return super.doubleValue();
- }
-
- /**
- *
- * @return True iff native methods will be used by this class
- */
- public static boolean isNative() {
- return _nativeOk;
- }
- /**
- * <p>Do whatever we can to load up the native library backing this
BigInteger's native methods.
- * If it can find a custom built jbigi.dll / libjbigi.so, it'll use
that. Otherwise
- * it'll try to look in the classpath for the correct library (see
loadFromResource).
- * If the user specifies -Djbigi.enable=false it'll skip all of
this.</p>
- *
- */
- private static final void loadNative() {
- try {
- String wantedProp = System.getProperty("jbigi.enable",
"true");
- boolean wantNative =
"true".equalsIgnoreCase(wantedProp);
- if(wantNative) {
- boolean loaded = loadFromResource(true);
- if(loaded) {
- _nativeOk = true;
- if(_doLog)
- System.err.println("INFO:
Optimized native BigInteger library '" + getResourceName(true) + "' loaded from
resource");
- } else {
- loaded = loadGeneric(true);
- if(loaded) {
- _nativeOk = true;
- if(_doLog)
-
System.err.println("INFO: Optimized native BigInteger library '" +
getMiddleName(true) + "' loaded from somewhere in the path");
- } else {
- loaded =
loadFromResource(false);
- if(loaded) {
- _nativeOk = true;
- if(_doLog)
-
System.err.println("INFO: Non-optimized native BigInteger library '" +
getResourceName(false) + "' loaded from resource");
- } else {
- loaded =
loadGeneric(false);
- if(loaded) {
- _nativeOk =
true;
- if(_doLog)
-
System.err.println("INFO: Non-optimized native BigInteger library '" +
getMiddleName(false) + "' loaded from somewhere in the path");
- } else
- _nativeOk =
false;
- }
- }
- }
- }
- if(_doLog && !_nativeOk)
- System.err.println("INFO: Native BigInteger
library jbigi not loaded - using pure java");
- } catch(Throwable e) {
- if(_doLog)
- System.err.println("INFO: Native BigInteger
library jbigi not loaded, reason: '" + e.getMessage() + "' - using pure java");
- }
- }
-
- /**
- * <p>Try loading it from an explictly build jbigi.dll / libjbigi.so
first, before
- * looking into a jbigi.jar for any other libraries.</p>
- *
- * @return true if it was loaded successfully, else false
- *
- */
- private static final boolean loadGeneric(boolean optimized) {
- try {
- String name = getMiddleName(optimized);
- if(name == null)
- return false;
- System.loadLibrary(name);
- return true;
- } catch(UnsatisfiedLinkError ule) {
- return false;
- }
- }
-
- /**
- * A helper function to make loading the native library easier.
- * @param f The File to which to write the library
- * @param URL The URL of the resource
- * @return True is the library was loaded, false on error
- * @throws FileNotFoundException If the library could not be read from
the reference
- * @throws UnsatisfiedLinkError If and only if the library is
incompatible with this system
- */
- private static final boolean tryLoadResource(File f, URL resource)
- throws FileNotFoundException, UnsatisfiedLinkError {
- InputStream is;
- try {
- is = resource.openStream();
- } catch(IOException e) {
- f.delete();
- throw new FileNotFoundException();
- }
-
- FileOutputStream fos = null;
- try {
- f.deleteOnExit();
- fos = new FileOutputStream(f);
- byte[] buf = new byte[4096 * 1024];
- int read;
- while((read = is.read(buf)) > 0) {
- fos.write(buf, 0, read);
- }
- fos.close();
- fos = null;
- System.load(f.getAbsolutePath());
- return true;
- } catch(IOException e) {
- } catch(UnsatisfiedLinkError ule) {
- // likely to be "noexec"
- if(ule.toString().toLowerCase().indexOf("not
permitted") == -1)
- throw ule;
- } finally {
- Closer.close(fos);
- f.delete();
- }
-
- return false;
- }
-
- /**
- * <p>Check all of the jars in the classpath for the file specified by
the
- * environmental property "jbigi.impl" and load it as the native
library
- * implementation. For instance, a windows user on a p4 would define
- * -Djbigi.impl=win-686 if there is a jbigi.jar in the classpath
containing the
- * files "win-686", "win-athlon", "freebsd-p4", "linux-p3", where each
- * of those files contain the correct binary file for a native library
(e.g.
- * windows DLL, or a *nix .so). </p>
- *
- * <p>This is a pretty ugly hack, using the general technique
illustrated by the
- * onion FEC libraries. It works by pulling the resource, writing out
the
- * byte stream to a temporary file, loading the native library from
that file,
- * then deleting the file.</p>
- *
- * @return true if it was loaded successfully, else false
- *
- */
- private static final boolean loadFromResource(boolean optimized) {
- String resourceName = getResourceName(optimized);
- if(resourceName == null)
- return false;
- URL resource =
NativeBigInteger.class.getClassLoader().getResource(resourceName);
- if(resource == null) {
- if(_doLog)
- System.err.println("NOTICE: Resource name [" +
getResourceName(true) + "] was not found");
- return false;
- }
- File temp = null;
- try {
- try {
- temp = File.createTempFile("jbigi", "lib.tmp");
- if(tryLoadResource(temp, resource))
- return true;
- } catch(IOException e) {
- } finally {
- if(temp != null) temp.delete();
- }
- Logger.error(NativeBigInteger.class, "Can't load from "
+ System.getProperty("java.io.tmpdir"));
- System.err.println("Can't load from " +
System.getProperty("java.io.tmpdir"));
- temp = new File("jbigi-lib.tmp");
- if(tryLoadResource(temp, resource))
- return true;
- } catch(Exception fnf) {
- Logger.error(NativeBigInteger.class, "Error reading
jbigi resource", fnf);
- System.err.println("Error reading jbigi resource");
- } catch(UnsatisfiedLinkError ule) {
- Logger.error(NativeBigInteger.class, "Library " +
resourceName + " is not appropriate for this system.");
- System.err.println("Library " + resourceName + " is not
appropriate for this system.");
- } finally {
- if(temp != null) temp.delete();
- }
-
- return false;
- }
-
- private static final String getResourceName(boolean optimized) {
- String pname =
NativeBigInteger.class.getPackage().getName().replace('.', '/');
- String pref = getLibraryPrefix();
- String middle = getMiddleName(optimized);
- String suff = getLibrarySuffix();
- if((pref == null) || (middle == null) || (suff == null))
- return null;
- return pname + '/' + pref + middle + '.' + suff;
- }
-
- private static final String getMiddleName(boolean optimized) {
-
- String sAppend;
- if(optimized)
- if(sCPUType == null)
- return null;
- else
- sAppend = '-' + sCPUType;
- else
- sAppend = "-none";
-
- boolean isWindows =
(System.getProperty("os.name").toLowerCase().indexOf("windows") != -1);
- boolean isLinux =
(System.getProperty("os.name").toLowerCase().indexOf("linux") != -1);
- boolean isFreebsd =
(System.getProperty("os.name").toLowerCase().indexOf("freebsd") != -1);
- boolean isMacOS =
(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
- if(isWindows)
- return "jbigi-windows" + sAppend; // The convention on
Windows
- if(isLinux)
- return "jbigi-linux" + sAppend; // The convention on
linux...
- if(isFreebsd)
- return "jbigi-freebsd" + sAppend; // The convention on
freebsd...
- if(isMacOS)
- return "jbigi-osx" + sAppend; // The convention on Mac
OS X...
- throw new RuntimeException("Dont know jbigi library name for os
type '" + System.getProperty("os.name") + '\'');
- }
-
- private static final String getLibrarySuffix() {
- boolean isWindows =
System.getProperty("os.name").toLowerCase().indexOf("windows") != -1;
- boolean isMacOS =
(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
- if(isWindows)
- return "dll";
- else if(isMacOS)
- return "jnilib";
- else
- return "so";
- }
-
- private static final String getLibraryPrefix() {
- boolean isWindows =
System.getProperty("os.name").toLowerCase().indexOf("windows") != -1;
- if(isWindows)
- return "";
- else
- return "lib";
- }
-}
Copied: trunk/freenet/src/net/i2p/util/NativeBigInteger.java (from rev 26320,
branches/db4o/freenet/src/net/i2p/util/NativeBigInteger.java)
===================================================================
--- trunk/freenet/src/net/i2p/util/NativeBigInteger.java
(rev 0)
+++ trunk/freenet/src/net/i2p/util/NativeBigInteger.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,493 @@
+package net.i2p.util;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import java.math.BigInteger;
+import java.util.Random;
+import java.net.URL;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.io.File;
+
+import freenet.support.HexUtil;
+import freenet.support.Logger;
+import freenet.support.CPUInformation.AMDCPUInfo;
+import freenet.support.CPUInformation.CPUID;
+import freenet.support.CPUInformation.CPUInfo;
+import freenet.support.CPUInformation.IntelCPUInfo;
+import freenet.support.CPUInformation.UnknownCPUException;
+import freenet.support.io.Closer;
+
+/**
+ * <p>BigInteger that takes advantage of the jbigi library for the modPow
operation,
+ * which accounts for a massive segment of the processing cost of asymmetric
+ * crypto. It also takes advantage of the jbigi library for converting a
BigInteger
+ * value to a double. Sun's implementation of the 'doubleValue()' method is
_very_ lousy.
+ *
+ * The jbigi library itself is basically just a JNI wrapper around the
+ * GMP library - a collection of insanely efficient routines for dealing with
+ * big numbers.</p>
+ *
+ * There are three environmental properties for configuring this component:
<ul>
+ * <li><b>jbigi.enable</b>: whether to use the native library (defaults to
"true")</li>
+ * <li><b>jbigi.impl</b>: select which resource to use as the native
implementation</li>
+ * <li><b>jbigi.ref</b>: the file specified in this parameter may contain a
resource
+ * name to override jbigi.impl (defaults to
"jbigi.cfg")</li>
+ * </ul>
+ *
+ * <p>If jbigi.enable is set to false, this class won't even attempt to use
the
+ * native library, but if it is set to true (or is not specified), it will
first
+ * check the platform specific library path for the "jbigi" library, as
defined by
+ * {@link Runtime#loadLibrary} - e.g. C:\windows\jbigi.dll or
/lib/libjbigi.so.
+ * If that fails, it reviews the jbigi.impl environment property - if that is
set,
+ * it checks all of the components in the CLASSPATH for the file specified and
+ * attempts to load it as the native library. If jbigi.impl is not set, if
there
+ * is no matching resource, or if that resource is not a valid OS/architecture
+ * specific library, the NativeBigInteger will revert to using the pure java
+ * implementation.</p>
+ *
+ * <p>That means <b>NativeBigInteger will not attempt to guess the correct
+ * platform/OS/whatever</b> - applications using this class should define that
+ * property prior to <i>referencing</i> the NativeBigInteger (or before loading
+ * the JVM, of course). Alternately, people with custom built jbigi
implementations
+ * in their OS's standard search path (LD_LIBRARY_PATH, etc) needn't
bother.</p>
+ *
+ * <p>One way to deploy the native library is to create a jbigi.jar file
containing
+ * all of the native implementations with filenames such as "win-athlon",
"linux-p2",
+ * "freebsd-sparcv4", where those files are the OS specific libraries (the
contents of
+ * the DLL or .so file built for those OSes / architectures). The user would
then
+ * simply specify -Djbigi.impl=win-athlon and this component would pick up
that
+ * library.</p>
+ *
+ * <p>Another way is to create a seperate jbigi.jar file for each platform
containing
+ * one file - "native", where that file is the OS / architecture specific
library
+ * implementation, as above. This way the user would download the correct
jbigi.jar
+ * (and not all of the libraries for platforms/OSes they don't need) and would
specify
+ * -Djbigi.impl=native.</p>
+ *
+ * <p>Running this class by itself does a basic unit test and benchmarks the
+ * NativeBigInteger.modPow/doubleValue vs. the BigInteger.modPow/doubleValue
by running a 2Kbit op 100
+ * times. At the end of each test, if the native implementation is loaded
this will output
+ * something like:</p>
+ * <pre>
+ * native run time: 6090ms (60ms each)
+ * java run time: 68067ms (673ms each)
+ * native = 8.947066860593239% of pure java time
+ * </pre>
+ *
+ * <p>If the native implementation is not loaded, it will start by saying:</p>
+ * <pre>
+ * WARN: Native BigInteger library jbigi not loaded - using pure java
+ * </pre>
+ * <p>Then go on to run the test, finally outputting:</p>
+ * <pre>
+ * java run time: 64653ms (640ms each)
+ * However, we couldn't load the native library, so this doesn't test much
+ * </pre>
+ *
+ */
+public class NativeBigInteger extends BigInteger {
+
+ /** did we load the native lib correctly? */
+ private static boolean _nativeOk = false;
+ /**
+ * do we want to dump some basic success/failure info to stderr during
+ * initialization? this would otherwise use the Log component, but
this makes
+ * it easier for other systems to reuse this class
+ */
+ private static final boolean _doLog = true;
+ private final static String JBIGI_OPTIMIZATION_K6 = "k6";
+ private final static String JBIGI_OPTIMIZATION_K6_2 = "k62";
+ private final static String JBIGI_OPTIMIZATION_K6_3 = "k63";
+ private final static String JBIGI_OPTIMIZATION_ATHLON = "athlon";
+ private final static String JBIGI_OPTIMIZATION_X86_64 = "x86_64";
+ private final static String JBIGI_OPTIMIZATION_X86_64_32 = "x86_64_32";
+ private final static String JBIGI_OPTIMIZATION_PENTIUM = "pentium";
+ private final static String JBIGI_OPTIMIZATION_PENTIUMMMX =
"pentiummmx";
+ private final static String JBIGI_OPTIMIZATION_PENTIUM2 = "pentium2";
+ private final static String JBIGI_OPTIMIZATION_PENTIUM3 = "pentium3";
+ private final static String JBIGI_OPTIMIZATION_PENTIUM4 = "pentium4";
+ private final static String JBIGI_OPTIMIZATION_PPC = "ppc";
+ private final static String sCPUType; //The CPU Type to optimize for
(one of the above strings)
+ private static final long serialVersionUID = 0xc5392a97bb283dd2L;
+
+ static {
+ sCPUType = resolveCPUType();
+ loadNative();
+ }
+
+ /** Tries to resolve the best type of CPU that we have an optimized
jbigi-dll/so for.
+ * @return A string containing the CPU-type or null if CPU type is
unknown
+ */
+ private static String resolveCPUType() {
+ try {
+
if(System.getProperty("os.arch").toLowerCase().matches("(i?[x0-9]86_64|amd64)"))
+ return JBIGI_OPTIMIZATION_X86_64;
+ else
if(System.getProperty("os.arch").toLowerCase().matches("(ppc)")) {
+ System.out.println("Detected PowerPC!");
+ return JBIGI_OPTIMIZATION_PPC;
+ } else {
+ CPUInfo c = CPUID.getInfo();
+ if(c instanceof AMDCPUInfo) {
+ AMDCPUInfo amdcpu = (AMDCPUInfo) c;
+ if(amdcpu.IsAthlon64Compatible())
+ return
JBIGI_OPTIMIZATION_X86_64_32;
+ if(amdcpu.IsAthlonCompatible())
+ return
JBIGI_OPTIMIZATION_ATHLON;
+ if(amdcpu.IsK6_3_Compatible())
+ return JBIGI_OPTIMIZATION_K6_3;
+ if(amdcpu.IsK6_2_Compatible())
+ return JBIGI_OPTIMIZATION_K6_2;
+ if(amdcpu.IsK6Compatible())
+ return JBIGI_OPTIMIZATION_K6;
+ } else
+ if(c instanceof IntelCPUInfo) {
+ IntelCPUInfo intelcpu =
(IntelCPUInfo) c;
+
if(intelcpu.IsPentium4Compatible())
+ return
JBIGI_OPTIMIZATION_PENTIUM4;
+
if(intelcpu.IsPentium3Compatible())
+ return
JBIGI_OPTIMIZATION_PENTIUM3;
+
if(intelcpu.IsPentium2Compatible())
+ return
JBIGI_OPTIMIZATION_PENTIUM2;
+
if(intelcpu.IsPentiumMMXCompatible())
+ return
JBIGI_OPTIMIZATION_PENTIUMMMX;
+
if(intelcpu.IsPentiumCompatible())
+ return
JBIGI_OPTIMIZATION_PENTIUM;
+ }
+ }
+ return null;
+ } catch(UnknownCPUException e) {
+ return null; //TODO: Log something here maybe..
+ }
+ }
+
+ /**
+ * calculate (base ^ exponent) % modulus.
+ *
+ * @param base
+ * big endian twos complement representation of the base
(but it must be positive)
+ * @param exponent
+ * big endian twos complement representation of the exponent
+ * @param modulus
+ * big endian twos complement representation of the modulus
+ * @return big endian twos complement representation of (base ^
exponent) % modulus
+ */
+ public native static byte[] nativeModPow(byte base[], byte exponent[],
byte modulus[]);
+
+ /**
+ * Converts a BigInteger byte-array to a 'double'
+ * @param ba Big endian twos complement representation of the
BigInteger to convert to a double
+ * @return The plain double-value represented by 'ba'
+ */
+ public native static double nativeDoubleValue(byte ba[]);
+ private byte[] cachedBa = null;
+
+ public NativeBigInteger(byte val[]) {
+ super(val);
+ // Takes up too much RAM
+// int targetLength = bitLength() / 8 + 1;
+// if(val.length == targetLength)
+// cachedBa = val;
+ }
+
+ public NativeBigInteger(int signum, byte magnitude[]) {
+ super(signum, magnitude);
+ }
+
+ public NativeBigInteger(int bitlen, int certainty, Random rnd) {
+ super(bitlen, certainty, rnd);
+ }
+
+ public NativeBigInteger(int numbits, Random rnd) {
+ super(numbits, rnd);
+ }
+
+ public NativeBigInteger(String val) {
+ super(val);
+ }
+
+ public NativeBigInteger(String val, int radix) {
+ super(val, radix);
+ }
+
+ /**Creates a new NativeBigInteger with the same value
+ * as the supplied BigInteger. Warning!, not very efficent
+ */
+ public NativeBigInteger(BigInteger integer) {
+ //Now, why doesn't sun provide a constructor
+ //like this one in BigInteger?
+ this(integer.toByteArray());
+ }
+
+ @Override
+ public BigInteger modPow(BigInteger exponent, BigInteger m) {
+ if(_nativeOk)
+ return new NativeBigInteger(nativeModPow(toByteArray(),
exponent.toByteArray(), m.toByteArray()));
+ else
+ return new NativeBigInteger(super.modPow(exponent, m));
+ }
+
+ @Override
+ public byte[] toByteArray() {
+ if(cachedBa == null) //Since we are immutable it is safe to
never update the cached ba after it has initially been generated
+ cachedBa = super.toByteArray();
+ return cachedBa;
+ }
+
+ @Override
+ public String toString(int radix) {
+ if(radix == 16)
+ return toHexString();
+ return super.toString(radix);
+ }
+
+ public String toHexString() {
+ byte[] buf = toByteArray();
+ return HexUtil.bytesToHex(buf);
+ }
+
+ @Override
+ public double doubleValue() {
+ if(_nativeOk)
+ return nativeDoubleValue(toByteArray());
+ else
+ return super.doubleValue();
+ }
+
+ /**
+ *
+ * @return True iff native methods will be used by this class
+ */
+ public static boolean isNative() {
+ return _nativeOk;
+ }
+ /**
+ * <p>Do whatever we can to load up the native library backing this
BigInteger's native methods.
+ * If it can find a custom built jbigi.dll / libjbigi.so, it'll use
that. Otherwise
+ * it'll try to look in the classpath for the correct library (see
loadFromResource).
+ * If the user specifies -Djbigi.enable=false it'll skip all of
this.</p>
+ *
+ */
+ private static final void loadNative() {
+ try {
+ String wantedProp = System.getProperty("jbigi.enable",
"true");
+ boolean wantNative =
"true".equalsIgnoreCase(wantedProp);
+ if(wantNative) {
+ boolean loaded = loadFromResource(true);
+ if(loaded) {
+ _nativeOk = true;
+ if(_doLog)
+ System.err.println("INFO:
Optimized native BigInteger library '" + getResourceName(true) + "' loaded from
resource");
+ } else {
+ loaded = loadGeneric(true);
+ if(loaded) {
+ _nativeOk = true;
+ if(_doLog)
+
System.err.println("INFO: Optimized native BigInteger library '" +
getMiddleName(true) + "' loaded from somewhere in the path");
+ } else {
+ loaded =
loadFromResource(false);
+ if(loaded) {
+ _nativeOk = true;
+ if(_doLog)
+
System.err.println("INFO: Non-optimized native BigInteger library '" +
getResourceName(false) + "' loaded from resource");
+ } else {
+ loaded =
loadGeneric(false);
+ if(loaded) {
+ _nativeOk =
true;
+ if(_doLog)
+
System.err.println("INFO: Non-optimized native BigInteger library '" +
getMiddleName(false) + "' loaded from somewhere in the path");
+ } else
+ _nativeOk =
false;
+ }
+ }
+ }
+ }
+ if(_doLog && !_nativeOk)
+ System.err.println("INFO: Native BigInteger
library jbigi not loaded - using pure java");
+ } catch(Throwable e) {
+ if(_doLog)
+ System.err.println("INFO: Native BigInteger
library jbigi not loaded, reason: '" + e.getMessage() + "' - using pure java");
+ }
+ }
+
+ /**
+ * <p>Try loading it from an explictly build jbigi.dll / libjbigi.so
first, before
+ * looking into a jbigi.jar for any other libraries.</p>
+ *
+ * @return true if it was loaded successfully, else false
+ *
+ */
+ private static final boolean loadGeneric(boolean optimized) {
+ try {
+ String name = getMiddleName(optimized);
+ if(name == null)
+ return false;
+ System.loadLibrary(name);
+ return true;
+ } catch(UnsatisfiedLinkError ule) {
+ return false;
+ }
+ }
+
+ /**
+ * A helper function to make loading the native library easier.
+ * @param f The File to which to write the library
+ * @param URL The URL of the resource
+ * @return True is the library was loaded, false on error
+ * @throws FileNotFoundException If the library could not be read from
the reference
+ * @throws UnsatisfiedLinkError If and only if the library is
incompatible with this system
+ */
+ private static final boolean tryLoadResource(File f, URL resource)
+ throws FileNotFoundException, UnsatisfiedLinkError {
+ InputStream is;
+ try {
+ is = resource.openStream();
+ } catch(IOException e) {
+ f.delete();
+ throw new FileNotFoundException();
+ }
+
+ FileOutputStream fos = null;
+ try {
+ f.deleteOnExit();
+ fos = new FileOutputStream(f);
+ byte[] buf = new byte[4096 * 1024];
+ int read;
+ while((read = is.read(buf)) > 0) {
+ fos.write(buf, 0, read);
+ }
+ fos.close();
+ fos = null;
+ System.load(f.getAbsolutePath());
+ return true;
+ } catch(IOException e) {
+ } catch(UnsatisfiedLinkError ule) {
+ // likely to be "noexec"
+ if(ule.toString().toLowerCase().indexOf("not
permitted") == -1)
+ throw ule;
+ } finally {
+ Closer.close(fos);
+ f.delete();
+ }
+
+ return false;
+ }
+
+ /**
+ * <p>Check all of the jars in the classpath for the file specified by
the
+ * environmental property "jbigi.impl" and load it as the native
library
+ * implementation. For instance, a windows user on a p4 would define
+ * -Djbigi.impl=win-686 if there is a jbigi.jar in the classpath
containing the
+ * files "win-686", "win-athlon", "freebsd-p4", "linux-p3", where each
+ * of those files contain the correct binary file for a native library
(e.g.
+ * windows DLL, or a *nix .so). </p>
+ *
+ * <p>This is a pretty ugly hack, using the general technique
illustrated by the
+ * onion FEC libraries. It works by pulling the resource, writing out
the
+ * byte stream to a temporary file, loading the native library from
that file,
+ * then deleting the file.</p>
+ *
+ * @return true if it was loaded successfully, else false
+ *
+ */
+ private static final boolean loadFromResource(boolean optimized) {
+ String resourceName = getResourceName(optimized);
+ if(resourceName == null)
+ return false;
+ URL resource =
NativeBigInteger.class.getClassLoader().getResource(resourceName);
+ if(resource == null) {
+ if(_doLog)
+ System.err.println("NOTICE: Resource name [" +
getResourceName(true) + "] was not found");
+ return false;
+ }
+ File temp = null;
+ try {
+ try {
+ temp = File.createTempFile("jbigi", "lib.tmp");
+ if(tryLoadResource(temp, resource))
+ return true;
+ } catch(IOException e) {
+ } finally {
+ if(temp != null) temp.delete();
+ }
+ Logger.error(NativeBigInteger.class, "Can't load from "
+ System.getProperty("java.io.tmpdir"));
+ System.err.println("Can't load from " +
System.getProperty("java.io.tmpdir"));
+ temp = new File("jbigi-lib.tmp");
+ if(tryLoadResource(temp, resource))
+ return true;
+ } catch(Exception fnf) {
+ Logger.error(NativeBigInteger.class, "Error reading
jbigi resource", fnf);
+ System.err.println("Error reading jbigi resource");
+ } catch(UnsatisfiedLinkError ule) {
+ Logger.error(NativeBigInteger.class, "Library " +
resourceName + " is not appropriate for this system.");
+ System.err.println("Library " + resourceName + " is not
appropriate for this system.");
+ } finally {
+ if(temp != null) temp.delete();
+ }
+
+ return false;
+ }
+
+ private static final String getResourceName(boolean optimized) {
+ String pname =
NativeBigInteger.class.getPackage().getName().replace('.', '/');
+ String pref = getLibraryPrefix();
+ String middle = getMiddleName(optimized);
+ String suff = getLibrarySuffix();
+ if((pref == null) || (middle == null) || (suff == null))
+ return null;
+ return pname + '/' + pref + middle + '.' + suff;
+ }
+
+ private static final String getMiddleName(boolean optimized) {
+
+ String sAppend;
+ if(optimized)
+ if(sCPUType == null)
+ return null;
+ else
+ sAppend = '-' + sCPUType;
+ else
+ sAppend = "-none";
+
+ boolean isWindows =
(System.getProperty("os.name").toLowerCase().indexOf("windows") != -1);
+ boolean isLinux =
(System.getProperty("os.name").toLowerCase().indexOf("linux") != -1);
+ boolean isFreebsd =
(System.getProperty("os.name").toLowerCase().indexOf("freebsd") != -1);
+ boolean isMacOS =
(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
+ if(isWindows)
+ return "jbigi-windows" + sAppend; // The convention on
Windows
+ if(isLinux)
+ return "jbigi-linux" + sAppend; // The convention on
linux...
+ if(isFreebsd)
+ return "jbigi-freebsd" + sAppend; // The convention on
freebsd...
+ if(isMacOS)
+ return "jbigi-osx" + sAppend; // The convention on Mac
OS X...
+ throw new RuntimeException("Dont know jbigi library name for os
type '" + System.getProperty("os.name") + '\'');
+ }
+
+ private static final String getLibrarySuffix() {
+ boolean isWindows =
System.getProperty("os.name").toLowerCase().indexOf("windows") != -1;
+ boolean isMacOS =
(System.getProperty("os.name").toLowerCase().indexOf("mac os x") != -1);
+ if(isWindows)
+ return "dll";
+ else if(isMacOS)
+ return "jnilib";
+ else
+ return "so";
+ }
+
+ private static final String getLibraryPrefix() {
+ boolean isWindows =
System.getProperty("os.name").toLowerCase().indexOf("windows") != -1;
+ if(isWindows)
+ return "";
+ else
+ return "lib";
+ }
+}
Property changes on: trunk/freenet/test/freenet/support/io/MockInputStream.java
___________________________________________________________________
Added: svn:mergeinfo
+
Deleted: trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java
===================================================================
--- branches/db4o/freenet/test/net/i2p/util/NativeBigIntegerTest.java
2009-04-01 19:43:11 UTC (rev 26320)
+++ trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -1,114 +0,0 @@
-package net.i2p.util;
-
-import java.math.BigInteger;
-import java.security.SecureRandom;
-
-import junit.framework.TestCase;
-
-public class NativeBigIntegerTest extends TestCase {
- // Run with <code>ant -Dbenchmark=true</code> to do benchmark
- private static final boolean BENCHMARK =
Boolean.getBoolean("benchmark");
- private static int numRuns = BENCHMARK ? 200 : 5;
-
- /*
- * the sample numbers are elG generator/prime so we can test with
reasonable
- * numbers
- */
- private final static byte[] _sampleGenerator = new
BigInteger("2").toByteArray();
- private final static byte[] _samplePrime = new
BigInteger("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" +
"EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" +
"EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" +
"83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" +
"E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
- + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" +
"15728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
- .toByteArray();
-
- private SecureRandom rand;
- private int runsProcessed;
-
- private BigInteger jg;
- private BigInteger jp;
-
- private long totalTime = 0;
- private long javaTime = 0;
-
- protected void setUp() throws Exception {
- if (!NativeBigInteger.isNative())
- printError("can't load native code");
-
- printInfo("DEBUG: Warming up the random number generator...");
- rand = new SecureRandom();
- rand.nextBoolean();
- printInfo("DEBUG: Random number generator warmed up");
-
- jg = new BigInteger(_sampleGenerator);
- jp = new BigInteger(_samplePrime);
-
- totalTime = javaTime = 0;
- }
-
- protected void tearDown() throws Exception {
- printInfo("INFO: run time: " + totalTime + "ms (" + (totalTime
/ (runsProcessed + 1)) + "ms each)");
- if (numRuns == runsProcessed)
- printInfo("INFO: " + runsProcessed + " runs complete
without any errors");
- else
- printError("ERROR: " + runsProcessed + " runs until we
got an error");
-
- printInfo("native run time: \t" + totalTime + "ms (" +
(totalTime / (runsProcessed + 1)) + "ms each)");
- printInfo("java run time: \t" + javaTime + "ms (" + (javaTime
/ (runsProcessed + 1)) + "ms each)");
- printInfo("native = " + ((totalTime * 100.0d) / (double)
javaTime) + "% of pure java time");
- }
-
- public void testModPow() {
- for (runsProcessed = 0; runsProcessed < numRuns;
runsProcessed++) {
- BigInteger bi = new BigInteger(2048, rand);
- NativeBigInteger g = new
NativeBigInteger(_sampleGenerator);
- NativeBigInteger p = new NativeBigInteger(_samplePrime);
- NativeBigInteger k = new NativeBigInteger(1,
bi.toByteArray());
-
- long beforeModPow = System.currentTimeMillis();
- BigInteger myValue = g.modPow(k, p);
- long afterModPow = System.currentTimeMillis();
- BigInteger jval = jg.modPow(bi, jp);
- long afterJavaModPow = System.currentTimeMillis();
-
- totalTime += (afterModPow - beforeModPow);
- javaTime += (afterJavaModPow - afterModPow);
-
- assertEquals(jval, myValue);
- }
- }
-
- public void testDoubleValue() {
- BigInteger jg = new BigInteger(_sampleGenerator);
-
- int MULTIPLICATOR = 50000; //Run the doubleValue() calls within
a loop since they are pretty fast..
- for (runsProcessed = 0; runsProcessed < numRuns;
runsProcessed++) {
- NativeBigInteger g = new
NativeBigInteger(_sampleGenerator);
- long beforeDoubleValue = System.currentTimeMillis();
- double dNative = 0;
- for (int mult = 0; mult < MULTIPLICATOR; mult++)
- dNative = g.doubleValue();
- long afterDoubleValue = System.currentTimeMillis();
- double jval = 0;
- for (int mult = 0; mult < MULTIPLICATOR; mult++)
- jval = jg.doubleValue();
- long afterJavaDoubleValue = System.currentTimeMillis();
-
- totalTime += (afterDoubleValue - beforeDoubleValue);
- javaTime += (afterJavaDoubleValue - afterDoubleValue);
-
- assertEquals(jval, dNative, 0);
- }
- }
-
- private static void printInfo(String info) {
- if (BENCHMARK)
- System.out.println(info);
- }
-
- private static void printError(String info) {
- if (BENCHMARK)
- System.err.println(info);
- }
-}
Copied: trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java (from rev
26320, branches/db4o/freenet/test/net/i2p/util/NativeBigIntegerTest.java)
===================================================================
--- trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java
(rev 0)
+++ trunk/freenet/test/net/i2p/util/NativeBigIntegerTest.java 2009-04-01
20:34:09 UTC (rev 26322)
@@ -0,0 +1,114 @@
+package net.i2p.util;
+
+import java.math.BigInteger;
+import java.security.SecureRandom;
+
+import junit.framework.TestCase;
+
+public class NativeBigIntegerTest extends TestCase {
+ // Run with <code>ant -Dbenchmark=true</code> to do benchmark
+ private static final boolean BENCHMARK =
Boolean.getBoolean("benchmark");
+ private static int numRuns = BENCHMARK ? 200 : 5;
+
+ /*
+ * the sample numbers are elG generator/prime so we can test with
reasonable
+ * numbers
+ */
+ private final static byte[] _sampleGenerator = new
BigInteger("2").toByteArray();
+ private final static byte[] _samplePrime = new
BigInteger("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" +
"EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" +
"EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" +
"83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" +
"E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" +
"15728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
+ .toByteArray();
+
+ private SecureRandom rand;
+ private int runsProcessed;
+
+ private BigInteger jg;
+ private BigInteger jp;
+
+ private long totalTime = 0;
+ private long javaTime = 0;
+
+ protected void setUp() throws Exception {
+ if (!NativeBigInteger.isNative())
+ printError("can't load native code");
+
+ printInfo("DEBUG: Warming up the random number generator...");
+ rand = new SecureRandom();
+ rand.nextBoolean();
+ printInfo("DEBUG: Random number generator warmed up");
+
+ jg = new BigInteger(_sampleGenerator);
+ jp = new BigInteger(_samplePrime);
+
+ totalTime = javaTime = 0;
+ }
+
+ protected void tearDown() throws Exception {
+ printInfo("INFO: run time: " + totalTime + "ms (" + (totalTime
/ (runsProcessed + 1)) + "ms each)");
+ if (numRuns == runsProcessed)
+ printInfo("INFO: " + runsProcessed + " runs complete
without any errors");
+ else
+ printError("ERROR: " + runsProcessed + " runs until we
got an error");
+
+ printInfo("native run time: \t" + totalTime + "ms (" +
(totalTime / (runsProcessed + 1)) + "ms each)");
+ printInfo("java run time: \t" + javaTime + "ms (" + (javaTime
/ (runsProcessed + 1)) + "ms each)");
+ printInfo("native = " + ((totalTime * 100.0d) / (double)
javaTime) + "% of pure java time");
+ }
+
+ public void testModPow() {
+ for (runsProcessed = 0; runsProcessed < numRuns;
runsProcessed++) {
+ BigInteger bi = new BigInteger(2048, rand);
+ NativeBigInteger g = new
NativeBigInteger(_sampleGenerator);
+ NativeBigInteger p = new NativeBigInteger(_samplePrime);
+ NativeBigInteger k = new NativeBigInteger(1,
bi.toByteArray());
+
+ long beforeModPow = System.currentTimeMillis();
+ BigInteger myValue = g.modPow(k, p);
+ long afterModPow = System.currentTimeMillis();
+ BigInteger jval = jg.modPow(bi, jp);
+ long afterJavaModPow = System.currentTimeMillis();
+
+ totalTime += (afterModPow - beforeModPow);
+ javaTime += (afterJavaModPow - afterModPow);
+
+ assertEquals(jval, myValue);
+ }
+ }
+
+ public void testDoubleValue() {
+ BigInteger jg = new BigInteger(_sampleGenerator);
+
+ int MULTIPLICATOR = 50000; //Run the doubleValue() calls within
a loop since they are pretty fast..
+ for (runsProcessed = 0; runsProcessed < numRuns;
runsProcessed++) {
+ NativeBigInteger g = new
NativeBigInteger(_sampleGenerator);
+ long beforeDoubleValue = System.currentTimeMillis();
+ double dNative = 0;
+ for (int mult = 0; mult < MULTIPLICATOR; mult++)
+ dNative = g.doubleValue();
+ long afterDoubleValue = System.currentTimeMillis();
+ double jval = 0;
+ for (int mult = 0; mult < MULTIPLICATOR; mult++)
+ jval = jg.doubleValue();
+ long afterJavaDoubleValue = System.currentTimeMillis();
+
+ totalTime += (afterDoubleValue - beforeDoubleValue);
+ javaTime += (afterJavaDoubleValue - afterDoubleValue);
+
+ assertEquals(jval, dNative, 0);
+ }
+ }
+
+ private static void printInfo(String info) {
+ if (BENCHMARK)
+ System.out.println(info);
+ }
+
+ private static void printError(String info) {
+ if (BENCHMARK)
+ System.err.println(info);
+ }
+}