Author: toad
Date: 2005-11-17 22:21:20 +0000 (Thu, 17 Nov 2005)
New Revision: 7549
Modified:
trunk/freenet/src/freenet/client/FileInserter.java
trunk/freenet/src/freenet/client/Metadata.java
trunk/freenet/src/freenet/client/SplitFetcher.java
trunk/freenet/src/freenet/client/SplitInserter.java
trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
trunk/freenet/src/freenet/node/Version.java
trunk/freenet/src/freenet/store/BaseFreenetStore.java
Log:
185 (mandatory):
Hopefully working multi-level splitfile support (anything over 20MB or so is
multi-level).
JVM crashes fixed by always using pure java to decode splitfiles.
Swap the datastore filenames around.
Modified: trunk/freenet/src/freenet/client/FileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/FileInserter.java 2005-11-17 21:18:43 UTC
(rev 7548)
+++ trunk/freenet/src/freenet/client/FileInserter.java 2005-11-17 22:21:20 UTC
(rev 7549)
@@ -105,7 +105,7 @@
}
// Too big, encode to a splitfile
- SplitInserter splitInsert = new SplitInserter(data,
block.clientMetadata, bestCodec, ctx.splitfileAlgorithm, ctx, this,
NodeCHK.BLOCK_SIZE, getCHKOnly);
+ SplitInserter splitInsert = new SplitInserter(data,
block.clientMetadata, bestCodec, ctx.splitfileAlgorithm, ctx, this,
NodeCHK.BLOCK_SIZE, getCHKOnly, metadata);
return splitInsert.run();
}
Modified: trunk/freenet/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java 2005-11-17 21:18:43 UTC
(rev 7548)
+++ trunk/freenet/src/freenet/client/Metadata.java 2005-11-17 22:21:20 UTC
(rev 7549)
@@ -304,8 +304,11 @@
throw new IllegalArgumentException();
}
- public Metadata(short algo, FreenetURI[] dataURIs, FreenetURI[]
checkURIs, int segmentSize, int checkSegmentSize, ClientMetadata cm, long
dataLength, short compressionAlgo) {
- documentType = SIMPLE_REDIRECT;
+ public Metadata(short algo, FreenetURI[] dataURIs, FreenetURI[]
checkURIs, int segmentSize, int checkSegmentSize, ClientMetadata cm, long
dataLength, short compressionAlgo, boolean isMetadata) {
+ if(isMetadata)
+ documentType = MULTI_LEVEL_METADATA;
+ else
+ documentType = SIMPLE_REDIRECT;
splitfile = true;
splitfileAlgorithm = algo;
this.dataLength = dataLength;
Modified: trunk/freenet/src/freenet/client/SplitFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-17 21:18:43 UTC
(rev 7548)
+++ trunk/freenet/src/freenet/client/SplitFetcher.java 2005-11-17 22:21:20 UTC
(rev 7549)
@@ -111,7 +111,7 @@
unstartedSegments = new Vector();
for(int i=0;i<segments.length;i++)
unstartedSegments.add(segments[i]);
- Logger.minor(this, "Segments: "+unstartedSegments.size());
+ Logger.minor(this, "Segments: "+unstartedSegments.size()+",
data keys: "+splitfileDataBlocks.length+", check keys:
"+(splitfileCheckBlocks==null?0:splitfileCheckBlocks.length));
}
/**
Modified: trunk/freenet/src/freenet/client/SplitInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-17 21:18:43 UTC
(rev 7548)
+++ trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-17 22:21:20 UTC
(rev 7549)
@@ -26,6 +26,7 @@
final int segmentSize;
final int checkSegmentSize;
final int blockSize;
+ final boolean isMetadata;
SplitfileBlock[] origDataBlocks;
InsertSegment encodingSegment;
InsertSegment[] segments;
@@ -38,7 +39,7 @@
private SplitfileBlock[] fatalErrorBlocks;
private FileInserter inserter;
- public SplitInserter(Bucket data, ClientMetadata clientMetadata,
Compressor compressor, short splitfileAlgorithm, InserterContext ctx,
FileInserter inserter, int blockLength, boolean getCHKOnly) throws
InserterException {
+ public SplitInserter(Bucket data, ClientMetadata clientMetadata,
Compressor compressor, short splitfileAlgorithm, InserterContext ctx,
FileInserter inserter, int blockLength, boolean getCHKOnly, boolean isMetadata)
throws InserterException {
this.origData = data;
this.getCHKOnly = getCHKOnly;
this.blockSize = blockLength;
@@ -59,6 +60,7 @@
throw new
InserterException(InserterException.BUCKET_ERROR, e, null);
}
this.inserter = inserter;
+ this.isMetadata = isMetadata;
}
/**
@@ -106,6 +108,8 @@
FreenetURI[] dataURIs = getDataURIs();
FreenetURI[] checkURIs = getCheckURIs();
+ Logger.minor(this, "Data URIs: "+dataURIs.length+", check URIs:
"+checkURIs.length);
+
boolean missingURIs = anyNulls(dataURIs) || anyNulls(checkURIs);
if(missingURIs && fatalErrors == 0 && failed == 0)
@@ -115,7 +119,7 @@
if(!missingURIs) {
- Metadata metadata = new Metadata(splitfileAlgorithm,
dataURIs, checkURIs, segmentSize, checkSegmentSize, clientMetadata, dataLength,
compressionCodec);
+ Metadata metadata = new Metadata(splitfileAlgorithm,
dataURIs, checkURIs, segmentSize, checkSegmentSize, clientMetadata, dataLength,
compressionCodec, isMetadata);
Bucket mbucket;
try {
Modified: trunk/freenet/src/freenet/client/StandardOnionFECCodec.java
===================================================================
--- trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-17
21:18:43 UTC (rev 7548)
+++ trunk/freenet/src/freenet/client/StandardOnionFECCodec.java 2005-11-17
22:21:20 UTC (rev 7549)
@@ -116,7 +116,8 @@
return codec;
}
- private final FECCode code;
+ private final FECCode encoder;
+ private final FECCode decoder;
private final int k;
private final int n;
@@ -124,10 +125,11 @@
public StandardOnionFECCodec(int k, int n) {
this.k = k;
this.n = n;
- code = DefaultFECCodeFactory.getDefault().createFECCode(k,n);
- Logger.minor(this, "FEC impl is "+code);
+ // Best performance, doesn't crash
+ encoder = DefaultFECCodeFactory.getDefault().createFECCode(k,n);
// revert to below if above causes JVM crashes
- //code = new PureCode(k,n);
+ // Worst performance, but decode crashes
+ decoder = new PureCode(k,n);
}
private static Object runningDecodesSync = new Object();
@@ -249,7 +251,7 @@
int[] disposableIndexes = new
int[packetIndexes.length];
System.arraycopy(packetIndexes, 0,
disposableIndexes, 0,
packetIndexes.length);
- code.decode(packets, disposableIndexes);
+ decoder.decode(packets,
disposableIndexes);
// packets now contains an array of
decoded blocks, in order
// Write the data out
for (int i = 0; i < k; i++) {
@@ -405,7 +407,7 @@
// Runtime.getRuntime().runFinalization();
long memUsedBeforeStripe =
Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
Logger.minor(this, "Memory in use
before stripe: "+memUsedBeforeStripe);
- code.encode(dataPackets, checkPackets,
toEncode);
+ encoder.encode(dataPackets,
checkPackets, toEncode);
// Runtime.getRuntime().gc();
// Runtime.getRuntime().runFinalization();
// Runtime.getRuntime().gc();
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2005-11-17 21:18:43 UTC (rev
7548)
+++ trunk/freenet/src/freenet/node/Version.java 2005-11-17 22:21:20 UTC (rev
7549)
@@ -20,10 +20,10 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- public static final int buildNumber = 184;
+ public static final int buildNumber = 185;
/** Oldest build of Fred we will talk to */
- public static final int lastGoodBuild = 184;
+ public static final int lastGoodBuild = 185;
/** The highest reported build of fred */
public static int highestSeenBuild = buildNumber;
Modified: trunk/freenet/src/freenet/store/BaseFreenetStore.java
===================================================================
--- trunk/freenet/src/freenet/store/BaseFreenetStore.java 2005-11-17
21:18:43 UTC (rev 7548)
+++ trunk/freenet/src/freenet/store/BaseFreenetStore.java 2005-11-17
22:21:20 UTC (rev 7549)
@@ -31,8 +31,8 @@
public BaseFreenetStore(RandomAccessFile storeFile, RandomAccessFile
storeIndexFile,
RandomAccessFile headerStoreFile, RandomAccessFile
headerStoreIndexFile, long maxBlocks) throws Exception {
- dataStore = new DataStore(storeFile, storeIndexFile, DATA_BLOCK_SIZE,
maxBlocks);
- headersStore = new DataStore(headerStoreFile, headerStoreIndexFile,
HEADER_BLOCK_SIZE, maxBlocks);
+ dataStore = new DataStore(storeIndexFile, storeFile, DATA_BLOCK_SIZE,
maxBlocks);
+ headersStore = new DataStore(headerStoreIndexFile, headerStoreFile,
HEADER_BLOCK_SIZE, maxBlocks);
}
/**