Author: toad
Date: 2006-11-12 02:03:36 +0000 (Sun, 12 Nov 2006)
New Revision: 10895
Added:
trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java
Modified:
trunk/freenet/src/freenet/node/Node.java
trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
Log:
Don't close the global Environment more than once, and don't close it until the
individual store databases have been closed.
Also add a utility class for shutdown hooks that must be executed one group
after another.
Modified: trunk/freenet/src/freenet/node/Node.java
===================================================================
--- trunk/freenet/src/freenet/node/Node.java 2006-11-12 01:47:41 UTC (rev
10894)
+++ trunk/freenet/src/freenet/node/Node.java 2006-11-12 02:03:36 UTC (rev
10895)
@@ -270,6 +270,7 @@
/* These are private because must be protected by synchronized(this) */
private final Environment storeEnvironment;
private final EnvironmentMutableConfig envMutableConfig;
+ private final SemiOrderedShutdownHook storeShutdownHook;
/** The CHK datastore. Long term storage; data should only be inserted
here if
* this node is the closest location on the chain so far, and it is on
an
* insert (because inserts will always reach the most specialized node;
if we
@@ -1121,7 +1122,22 @@
e.printStackTrace();
throw new NodeInitException(EXIT_STORE_OTHER,
e.getMessage());
}
+
+ storeShutdownHook = new SemiOrderedShutdownHook();
+ Runtime.getRuntime().addShutdownHook(storeShutdownHook);
+ storeShutdownHook.addLateJob(new Thread() {
+ public void run() {
+ try {
+ storeEnvironment.close();
+ System.err.println("Successfully closed
all datastores.");
+ } catch (Throwable t) {
+ System.err.println("Caught "+t+"
closing environment");
+ t.printStackTrace();
+ }
+ }
+ });
+
nodeConfig.register("databaseMaxMemory", "20M", sortOrder++,
true, false, "Datastore maximum memory usage", "Maximum memory usage of the
database backing the datastore indexes", new LongCallback() {
public long get() {
@@ -1144,28 +1160,28 @@
Logger.normal(this, "Initializing CHK Datastore");
System.out.println("Initializing CHK Datastore
("+maxStoreKeys+" keys)");
chkDatastore =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, true, suffix,
maxStoreKeys,
- CHKBlock.DATA_LENGTH,
CHKBlock.TOTAL_HEADERS_LENGTH, true, BerkeleyDBFreenetStore.TYPE_CHK,
storeEnvironment, random);
+ CHKBlock.DATA_LENGTH,
CHKBlock.TOTAL_HEADERS_LENGTH, true, BerkeleyDBFreenetStore.TYPE_CHK,
storeEnvironment, random, storeShutdownHook);
Logger.normal(this, "Initializing CHK Datacache");
System.out.println("Initializing CHK Datacache
("+maxCacheKeys+":"+maxCacheKeys+" keys)");
chkDatacache =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, false, suffix,
maxCacheKeys,
- CHKBlock.DATA_LENGTH,
CHKBlock.TOTAL_HEADERS_LENGTH, true, BerkeleyDBFreenetStore.TYPE_CHK,
storeEnvironment, random);
+ CHKBlock.DATA_LENGTH,
CHKBlock.TOTAL_HEADERS_LENGTH, true, BerkeleyDBFreenetStore.TYPE_CHK,
storeEnvironment, random, storeShutdownHook);
Logger.normal(this, "Initializing pubKey Datastore");
System.out.println("Initializing pubKey Datastore");
pubKeyDatastore =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, true, suffix,
maxStoreKeys,
- DSAPublicKey.PADDED_SIZE, 0, true,
BerkeleyDBFreenetStore.TYPE_PUBKEY, storeEnvironment, random);
+ DSAPublicKey.PADDED_SIZE, 0, true,
BerkeleyDBFreenetStore.TYPE_PUBKEY, storeEnvironment, random,
storeShutdownHook);
Logger.normal(this, "Initializing pubKey Datacache");
System.out.println("Initializing pubKey Datacache
("+maxCacheKeys+" keys)");
pubKeyDatacache =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, false, suffix,
maxCacheKeys,
- DSAPublicKey.PADDED_SIZE, 0, true,
BerkeleyDBFreenetStore.TYPE_PUBKEY, storeEnvironment, random);
+ DSAPublicKey.PADDED_SIZE, 0, true,
BerkeleyDBFreenetStore.TYPE_PUBKEY, storeEnvironment, random,
storeShutdownHook);
// FIXME can't auto-fix SSK stores.
Logger.normal(this, "Initializing SSK Datastore");
System.out.println("Initializing SSK Datastore");
sskDatastore =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, true, suffix,
maxStoreKeys,
- SSKBlock.DATA_LENGTH,
SSKBlock.TOTAL_HEADERS_LENGTH, false, BerkeleyDBFreenetStore.TYPE_SSK,
storeEnvironment, random);
+ SSKBlock.DATA_LENGTH,
SSKBlock.TOTAL_HEADERS_LENGTH, false, BerkeleyDBFreenetStore.TYPE_SSK,
storeEnvironment, random, storeShutdownHook);
Logger.normal(this, "Initializing SSK Datacache");
System.out.println("Initializing SSK Datacache
("+maxCacheKeys+" keys)");
sskDatacache =
BerkeleyDBFreenetStore.construct(lastVersion, storeDir, false, suffix,
maxStoreKeys,
- SSKBlock.DATA_LENGTH,
SSKBlock.TOTAL_HEADERS_LENGTH, false, BerkeleyDBFreenetStore.TYPE_SSK,
storeEnvironment, random);
+ SSKBlock.DATA_LENGTH,
SSKBlock.TOTAL_HEADERS_LENGTH, false, BerkeleyDBFreenetStore.TYPE_SSK,
storeEnvironment, random, storeShutdownHook);
} catch (FileNotFoundException e1) {
String msg = "Could not open datastore: "+e1;
Logger.error(this, msg, e1);
@@ -1184,7 +1200,7 @@
e1.printStackTrace();
throw new NodeInitException(EXIT_STORE_OTHER, msg);
}
-
+
nodeConfig.register("throttleFile", "throttle.dat",
sortOrder++, true, false, "File to store the persistent throttle data to",
"File to store the persistent throttle data to", new StringCallback() {
public String get() {
Added: trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java
===================================================================
--- trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java 2006-11-12
01:47:41 UTC (rev 10894)
+++ trunk/freenet/src/freenet/node/SemiOrderedShutdownHook.java 2006-11-12
02:03:36 UTC (rev 10895)
@@ -0,0 +1,58 @@
+package freenet.node;
+
+import java.util.ArrayList;
+
+public class SemiOrderedShutdownHook extends Thread {
+
+ static final int TIMEOUT = 100*1000;
+ private final ArrayList earlyJobs;
+ private final ArrayList lateJobs;
+
+ public SemiOrderedShutdownHook() {
+ earlyJobs = new ArrayList();
+ lateJobs = new ArrayList();
+ }
+
+ public synchronized void addEarlyJob(Thread r) {
+ earlyJobs.add(r);
+ }
+
+ public synchronized void addLateJob(Thread r) {
+ lateJobs.add(r);
+ }
+
+ public void run() {
+ // First run early jobs, all at once, and wait for them to all
complete.
+
+ for(int i=0;i<earlyJobs.size();i++) {
+ Thread r = (Thread) earlyJobs.get(i);
+ r.start();
+ }
+ for(int i=0;i<earlyJobs.size();i++) {
+ Thread r = (Thread) earlyJobs.get(i);
+ try {
+ r.join(TIMEOUT);
+ } catch (InterruptedException e) {
+ // :(
+ // May as well move on
+ }
+ }
+
+ // Then run late jobs, all at once, and wait for them to all
complete (JVM will exit when we return).
+ for(int i=0;i<lateJobs.size();i++) {
+ Thread r = (Thread) lateJobs.get(i);
+ r.start();
+ }
+ for(int i=0;i<lateJobs.size();i++) {
+ Thread r = (Thread) lateJobs.get(i);
+ try {
+ r.join(TIMEOUT);
+ } catch (InterruptedException e) {
+ // :(
+ // May as well move on
+ }
+ }
+
+ }
+
+}
Modified: trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2006-11-12
01:47:41 UTC (rev 10894)
+++ trunk/freenet/src/freenet/store/BerkeleyDBFreenetStore.java 2006-11-12
02:03:36 UTC (rev 10895)
@@ -37,6 +37,7 @@
import freenet.keys.NodeSSK;
import freenet.keys.SSKBlock;
import freenet.keys.SSKVerifyException;
+import freenet.node.SemiOrderedShutdownHook;
import freenet.support.Fields;
import freenet.support.HexUtil;
import freenet.support.Logger;
@@ -86,7 +87,7 @@
private final static byte[] dummy = new byte[0];
public static BerkeleyDBFreenetStore construct(int lastVersion, File
baseStoreDir, boolean isStore,
- String suffix, long maxStoreKeys, int blockSize, int
headerSize, boolean throwOnTooFewKeys, short type, Environment
storeEnvironment, RandomSource random) throws Exception {
+ String suffix, long maxStoreKeys, int blockSize, int
headerSize, boolean throwOnTooFewKeys, short type, Environment
storeEnvironment, RandomSource random, SemiOrderedShutdownHook
storeShutdownHook) throws Exception {
/**
* Migration strategy:
@@ -130,7 +131,7 @@
// Don't need to create a new Environment, since we can
use the old one.
tmp = openStore(storeEnvironment, newDBPrefix,
newStoreFile, newFixSecondaryFile, maxStoreKeys,
- blockSize, headerSize,
throwOnTooFewKeys, false, lastVersion, type, false);
+ blockSize, headerSize,
throwOnTooFewKeys, false, lastVersion, type, false, storeShutdownHook);
} else if(oldDir.exists() && oldStoreFile.exists()) {
@@ -179,7 +180,7 @@
// Open the new store
tmp = openStore(storeEnvironment, newDBPrefix,
storeFile, newFixSecondaryFile, maxStoreKeys,
- blockSize, headerSize,
false, true, lastVersion, type, true);
+ blockSize, headerSize,
false, true, lastVersion, type, true, storeShutdownHook);
// Migrate all tuples from old database to new
database.
migrateTuples(oldEnv, oldChkDB, tmp);
@@ -197,7 +198,7 @@
// Reconstruct the new database from the store
file which is now in the right place.
tmp = openStore(storeEnvironment, newDBPrefix,
storeFile, newFixSecondaryFile, maxStoreKeys,
- blockSize, headerSize, true,
false, lastVersion, type, false);
+ blockSize, headerSize, true,
false, lastVersion, type, false, storeShutdownHook);
}
@@ -207,7 +208,7 @@
// Start from scratch, with new store.
tmp = openStore(storeEnvironment, newDBPrefix,
newStoreFile, newFixSecondaryFile, maxStoreKeys,
- blockSize, headerSize, true, false,
lastVersion, type, false);
+ blockSize, headerSize, true, false,
lastVersion, type, false, storeShutdownHook);
}
@@ -299,14 +300,15 @@
}
private static BerkeleyDBFreenetStore openStore(Environment
storeEnvironment, String newDBPrefix, File newStoreFile,
- File newFixSecondaryFile, long maxStoreKeys, int
blockSize, int headerSize, boolean throwOnTooFewKeys, boolean noCheck, int
lastVersion, short type, boolean wipe) throws Exception {
+ File newFixSecondaryFile, long maxStoreKeys, int
blockSize, int headerSize, boolean throwOnTooFewKeys,
+ boolean noCheck, int lastVersion, short type, boolean
wipe, SemiOrderedShutdownHook storeShutdownHook) throws Exception {
try {
if((lastVersion > 0) && (lastVersion < 852)) {
throw new DatabaseException("Reconstructing
store because started from old version");
}
return new BerkeleyDBFreenetStore(storeEnvironment,
newDBPrefix, newStoreFile, newFixSecondaryFile,
- maxStoreKeys, blockSize, headerSize,
throwOnTooFewKeys, noCheck, wipe);
+ maxStoreKeys, blockSize, headerSize,
throwOnTooFewKeys, noCheck, wipe, storeShutdownHook);
} catch (DatabaseException e) {
System.err.println("Could not open store: "+e);
@@ -322,7 +324,7 @@
// Reconstruct
- return new BerkeleyDBFreenetStore(storeEnvironment,
newDBPrefix, newStoreFile, newFixSecondaryFile, maxStoreKeys, blockSize,
headerSize, type, noCheck);
+ return new BerkeleyDBFreenetStore(storeEnvironment,
newDBPrefix, newStoreFile, newFixSecondaryFile, maxStoreKeys, blockSize,
headerSize, type, noCheck, storeShutdownHook);
}
}
@@ -400,7 +402,7 @@
* @throws DatabaseException
* @throws FileNotFoundException if the dir does not exist and could not
be created
*/
- public BerkeleyDBFreenetStore(Environment env, String prefix, File
storeFile, File fixSecondaryFile, long maxChkBlocks, int blockSize, int
headerSize, boolean throwOnTooFewKeys, boolean noCheck, boolean wipe) throws
IOException, DatabaseException {
+ public BerkeleyDBFreenetStore(Environment env, String prefix, File
storeFile, File fixSecondaryFile, long maxChkBlocks, int blockSize, int
headerSize, boolean throwOnTooFewKeys, boolean noCheck, boolean wipe,
SemiOrderedShutdownHook storeShutdownHook) throws IOException,
DatabaseException {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.dataBlockSize = blockSize;
this.headerBlockSize = headerSize;
@@ -545,7 +547,7 @@
}
// Add shutdownhook
- Runtime.getRuntime().addShutdownHook(new
ShutdownHook());
+ storeShutdownHook.addEarlyJob(new ShutdownHook());
} catch (DatabaseException t) {
Logger.error(this, "Caught "+t, t);
close(false);
@@ -855,7 +857,7 @@
* @param the directory where the store is located
* @throws FileNotFoundException if the dir does not exist and could not
be created
*/
- public BerkeleyDBFreenetStore(Environment env, String prefix, File
storeFile, File fixSecondaryFile, long maxChkBlocks, int blockSize, int
headerSize, short type, boolean noCheck) throws Exception {
+ public BerkeleyDBFreenetStore(Environment env, String prefix, File
storeFile, File fixSecondaryFile, long maxChkBlocks, int blockSize, int
headerSize, short type, boolean noCheck, SemiOrderedShutdownHook
storeShutdownHook) throws Exception {
logMINOR = Logger.shouldLog(Logger.MINOR, this);
this.dataBlockSize = blockSize;
this.headerBlockSize = headerSize;
@@ -928,7 +930,7 @@
}
// Add shutdownhook
- Runtime.getRuntime().addShutdownHook(new ShutdownHook());
+ storeShutdownHook.addEarlyJob(new ShutdownHook());
}
private void reconstruct(short type) throws DatabaseException {
@@ -1804,15 +1806,15 @@
// This is nothing too problematic however
since the worst thing that should
// happen is that we miss the last few
store()'s and get an exception.
logMINOR = Logger.shouldLog(Logger.MINOR, this);
- if(logMINOR) Logger.minor(this, "Closing database.");
+ if(logMINOR) Logger.minor(this, "Closing database "+this);
closed=true;
if(reallyClosed) {
- Logger.normal(this, "Already closed");
+ Logger.error(this, "Already closed "+this);
return;
}
synchronized(closeLock) {
if(reallyClosed) {
- Logger.normal(this, "Already closed");
+ Logger.error(this, "Already closed
"+this);
return;
}
// Give all threads some time to complete
@@ -1846,13 +1848,6 @@
System.err.println("Caught closing
database: "+t);
t.printStackTrace();
}
- try {
- if(environment != null)
- environment.close();
- } catch (Throwable t) {
- System.err.println("Caught closing
database: "+t);
- t.printStackTrace();
- }
if(logMINOR) Logger.minor(this, "Closing
database finished.");
System.err.println("Closed database");
reallyClosed = true;