Author: toad
Date: 2005-10-27 21:16:28 +0000 (Thu, 27 Oct 2005)
New Revision: 7458
Added:
trunk/freenet/src/freenet/client/ArchiveHandler.java
trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
trunk/freenet/src/freenet/support/RandomAccessFileBucket.java
Removed:
trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
Modified:
trunk/freenet/src/freenet/client/ArchiveManager.java
trunk/freenet/src/freenet/client/ArchiveStoreContext.java
trunk/freenet/src/freenet/client/ArchiveStoreItem.java
trunk/freenet/src/freenet/client/RealArchiveStoreItem.java
trunk/freenet/src/freenet/client/TempStoreElement.java
trunk/freenet/src/freenet/crypt/CipherOutputStream.java
trunk/freenet/src/freenet/support/Bucket.java
trunk/freenet/src/freenet/support/BucketTools.java
trunk/freenet/src/freenet/support/io/FileBucket.java
trunk/freenet/src/freenet/support/io/FileBucketFactory.java
Log:
Implemented PaddedEphemerallyEncryptedBucket. (renamed from
PaddedEncryptedBucket).
Fixed some compile errors.
More work on archives.
Added: trunk/freenet/src/freenet/client/ArchiveHandler.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveHandler.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/ArchiveHandler.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -0,0 +1,37 @@
+package freenet.client;
+
+import freenet.support.Bucket;
+
+/**
+ * The public face (to Fetcher, for example) of ArchiveStoreContext.
+ * Just has methods for fetching stuff.
+ */
+interface ArchiveHandler {
+
+ /**
+ * Get the metadata for this ZIP manifest, as a Bucket.
+ * @throws FetchException If the container could not be fetched.
+ * @throws MetadataParseException If there was an error parsing
intermediary metadata.
+ */
+ public abstract Bucket getMetadata(ArchiveContext archiveContext,
+ FetcherContext fetchContext, ClientMetadata dm, int
recursionLevel)
+ throws ArchiveFailureException, ArchiveRestartException,
+ MetadataParseException, FetchException;
+
+ /**
+ * Get a file from this ZIP manifest, as a Bucket.
+ * If possible, read it from cache. If necessary, refetch the
+ * container and extract it. If that fails, throw.
+ * @param inSplitZipManifest If true, indicates that the key points to
a splitfile zip manifest,
+ * which means that we need to pass a flag to the fetcher to tell it to
pretend it was a straight
+ * splitfile.
+ * @throws FetchException
+ * @throws MetadataParseException
+ */
+ public abstract Bucket get(String internalName,
+ ArchiveContext archiveContext, FetcherContext
fetchContext,
+ ClientMetadata dm, int recursionLevel)
+ throws ArchiveFailureException, ArchiveRestartException,
+ MetadataParseException, FetchException;
+
+}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/client/ArchiveManager.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveManager.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/ArchiveManager.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -15,10 +15,11 @@
import freenet.crypt.ciphers.Rijndael;
import freenet.keys.FreenetURI;
import freenet.support.Bucket;
+import freenet.support.BucketTools;
import freenet.support.HexUtil;
import freenet.support.LRUHashtable;
import freenet.support.Logger;
-import freenet.support.PaddedEncryptedBucket;
+import freenet.support.PaddedEphemerallyEncryptedBucket;
import freenet.support.io.FileBucket;
/**
@@ -27,6 +28,8 @@
* files open due to the limitations of the API)
* - Keep up to Y bytes (after padding and overheads) of decoded data on disk
* (the OS is quite capable of determining what to keep in actual RAM)
+ *
+ * Always take the lock on ArchiveStoreContext before the lock on
ArchiveManager, NOT the other way around.
*/
public class ArchiveManager {
@@ -104,15 +107,23 @@
return ase.dataAsBucket();
}
+ public synchronized void removeCachedItem(ArchiveStoreItem item) {
+ storedData.removeKey(item.key);
+ }
+
/**
- * Extract data to cache.
+ * Extract data to cache. Call synchronized on ctx.
* @param key The key the data was fetched from.
* @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP.
* @param data The actual data fetched.
* @param archiveContext The context for the whole fetch process.
- * @throws ArchiveFailureException
+ * @param ctx The ArchiveStoreContext for this key.
+ * @throws ArchiveFailureException If we could not extract the data, or
it was too big, etc.
+ * @throws ArchiveRestartException
+ * @throws ArchiveRestartException If the request needs to be restarted
because the archive
+ * changed.
*/
- public void extractToCache(FreenetURI key, short archiveType, Bucket
data, ArchiveContext archiveContext, ArchiveStoreContext ctx) throws
ArchiveFailureException {
+ public void extractToCache(FreenetURI key, short archiveType, Bucket
data, ArchiveContext archiveContext, ArchiveStoreContext ctx) throws
ArchiveFailureException, ArchiveRestartException {
ctx.removeAllCachedItems(); // flush cache anyway
long expectedSize = ctx.getLastSize();
long archiveSize = data.size();
@@ -120,14 +131,21 @@
* after we have unpacked everything.
*/
boolean throwAtExit = false;
- if(archiveSize != expectedSize) {
+ if(expectedSize != -1 && archiveSize != expectedSize) {
throwAtExit = true;
+ ctx.setLastSize(archiveSize);
}
byte[] expectedHash = ctx.getLastHash();
if(expectedHash != null) {
- byte[] realHash = BucketTools.hash(data);
+ byte[] realHash;
+ try {
+ realHash = BucketTools.hash(data);
+ } catch (IOException e) {
+ throw new ArchiveFailureException("Error
reading archive data: "+e, e);
+ }
if(!Arrays.equals(realHash, expectedHash))
throwAtExit = true;
+ ctx.setLastHash(realHash);
}
if(data.size() > maxArchiveSize)
throw new ArchiveFailureException("Archive too big");
@@ -140,7 +158,7 @@
ZipEntry entry = zis.getNextEntry();
byte[] buf = new byte[4096];
HashSet names = new HashSet();
- boolean gotMetadata;
+ boolean gotMetadata = false;
outer: while(entry != null) {
entry = zis.getNextEntry();
String name = entry.getName();
@@ -193,6 +211,23 @@
}
}
+ /**
+ * Generate fake metadata for an archive which doesn't have any.
+ * @param ctx The context object.
+ * @param key The key from which the archive we are unpacking was
fetched.
+ * @param names Set of names in the archive.
+ */
+ private void generateMetadata(ArchiveStoreContext ctx, FreenetURI key,
HashSet names) {
+ /* What we have to do is to:
+ * - Construct a filesystem tree of the names.
+ * - Turn each level of the tree into a Metadata object,
including those below it, with
+ * simple manifests and archive internal redirects.
+ * - Turn the master Metadata object into binary metadata, with
all its subsidiaries.
+ * - Create a .metadata entry containing this data.
+ */
+ // TODO implement!
+ }
+
private void addErrorElement(ArchiveStoreContext ctx, FreenetURI key,
String name, String error) {
ErrorArchiveStoreItem element = new ErrorArchiveStoreItem(ctx,
key, name, error);
synchronized(storedData) {
@@ -239,9 +274,7 @@
byte[] cipherKey = new byte[32];
random.nextBytes(cipherKey);
try {
- Rijndael aes = new Rijndael(256, 256);
- PCFBMode pcfb = new PCFBMode(aes);
- PaddedEncryptedBucket encryptedBucket = new
PaddedEncryptedBucket(fb, pcfb, 1024);
+ PaddedEphemerallyEncryptedBucket encryptedBucket = new
PaddedEphemerallyEncryptedBucket(fb, 1024, random);
return new TempStoreElement(myFile, fb,
encryptedBucket);
} catch (UnsupportedCipherException e) {
throw new Error("Unsupported cipher: AES 256/256!", e);
Modified: trunk/freenet/src/freenet/client/ArchiveStoreContext.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveStoreContext.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/ArchiveStoreContext.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -2,6 +2,7 @@
import freenet.keys.FreenetURI;
import freenet.support.Bucket;
+import freenet.support.DoublyLinkedListImpl;
/**
* Tracks all files currently in the cache from a given key.
@@ -9,6 +10,8 @@
* then throw an ArchiveRestartedException).
* Provides fetch methods for Fetcher, which try the cache and then fetch if
necessary,
* subject to the above.
+ *
+ * Always take the lock on ArchiveStoreContext before the lock on
ArchiveManager, NOT the other way around.
*/
class ArchiveStoreContext implements ArchiveHandler {
@@ -20,6 +23,7 @@
this.manager = manager;
this.key = key;
this.archiveType = archiveType;
+ myItems = new DoublyLinkedListImpl();
}
public void finalize() {
@@ -59,8 +63,63 @@
if(fetchContext == null) return null;
Fetcher fetcher = new Fetcher(key, fetchContext,
archiveContext);
FetchResult result = fetcher.realRun(dm,
recursionLevel, key);
- manager.extractToCache(key, archiveType, result.data,
archiveContext);
+ manager.extractToCache(key, archiveType, result.data,
archiveContext, this);
return manager.getCached(key, internalName);
}
}
+
+ // Archive size
+ long lastSize = -1;
+
+ /** Returns the size of the archive last time we fetched it, or -1 */
+ long getLastSize() {
+ return lastSize;
+ }
+
+ /** Sets the size of the archive - @see getLastSize() */
+ public void setLastSize(long size) {
+ lastSize = size;
+ }
+
+ // Archive hash
+
+ byte[] lastHash = null;
+
+ /** Returns the hash of the archive last time we fetched it, or null */
+ public byte[] getLastHash() {
+ return lastHash;
+ }
+
+ /** Sets the hash of the archive - @see getLastHash() */
+ public void setLastHash(byte[] realHash) {
+ lastHash = realHash;
+ }
+
+ // Index of still-cached ArchiveStoreItems with this key
+
+ /** Index of still-cached ArchiveStoreItems with this key */
+ final DoublyLinkedListImpl myItems;
+
+ public void removeAllCachedItems() {
+ synchronized(myItems) {
+ ArchiveStoreItem item;
+ while((item = (ArchiveStoreItem) myItems.pop()) !=
null) {
+ manager.removeCachedItem(item);
+ item.finalize();
+ }
+ }
+ }
+
+ public void addItem(ArchiveStoreItem item) {
+ synchronized(myItems) {
+ myItems.push(item);
+ }
+ }
+
+ public void removeItem(ArchiveStoreItem item) {
+ synchronized(myItems) {
+ myItems.remove(item);
+ }
+ }
+
}
Modified: trunk/freenet/src/freenet/client/ArchiveStoreItem.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveStoreItem.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/ArchiveStoreItem.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -1,17 +1,24 @@
package freenet.client;
+import freenet.support.DoublyLinkedListImpl;
+
/**
* Base class for items stored in the archive cache.
*/
-abstract class ArchiveStoreItem {
+abstract class ArchiveStoreItem extends DoublyLinkedListImpl.Item {
final ArchiveKey key;
final ArchiveStoreContext context;
ArchiveStoreItem(ArchiveKey key, ArchiveStoreContext context) {
this.key = key;
this.context = context;
+ context.addItem(this);
}
- /** Expected to delete any stored data on disk, and decrement
cachedData. */
- public abstract void finalize();
+ /** Expected to delete any stored data on disk, and decrement
cachedData.
+ * Implemented to remove self from context.
+ */
+ public void finalize() {
+ context.removeItem(this);
+ }
}
Modified: trunk/freenet/src/freenet/client/RealArchiveStoreItem.java
===================================================================
--- trunk/freenet/src/freenet/client/RealArchiveStoreItem.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/RealArchiveStoreItem.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -4,7 +4,7 @@
import freenet.keys.FreenetURI;
import freenet.support.Bucket;
-import freenet.support.PaddedEncryptedBucket;
+import freenet.support.PaddedEphemerallyEncryptedBucket;
import freenet.support.io.FileBucket;
import freenet.support.io.FileUtil;
@@ -13,7 +13,7 @@
private final ArchiveManager manager;
boolean finalized;
File myFilename;
- PaddedEncryptedBucket bucket;
+ PaddedEphemerallyEncryptedBucket bucket;
FileBucket underBucket;
/**
Modified: trunk/freenet/src/freenet/client/TempStoreElement.java
===================================================================
--- trunk/freenet/src/freenet/client/TempStoreElement.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/client/TempStoreElement.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -5,18 +5,18 @@
import java.io.File;
-import freenet.support.PaddedEncryptedBucket;
+import freenet.support.PaddedEphemerallyEncryptedBucket;
import freenet.support.io.FileBucket;
class TempStoreElement {
- TempStoreElement(File myFile, FileBucket fb, PaddedEncryptedBucket
encryptedBucket) {
+ TempStoreElement(File myFile, FileBucket fb,
PaddedEphemerallyEncryptedBucket encryptedBucket) {
this.myFilename = myFile;
this.underBucket = fb;
this.bucket = encryptedBucket;
}
File myFilename;
- PaddedEncryptedBucket bucket;
+ PaddedEphemerallyEncryptedBucket bucket;
FileBucket underBucket;
public void finalize() {
Modified: trunk/freenet/src/freenet/crypt/CipherOutputStream.java
===================================================================
--- trunk/freenet/src/freenet/crypt/CipherOutputStream.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/crypt/CipherOutputStream.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -47,6 +47,11 @@
ctx.blockEncipher(tmp, 0, len);
out.write(tmp);
}
+
+ // FOS will use write(int) to implement this if we don't override it!
+ public void write(byte[] buf) throws IOException {
+ write(buf, 0, buf.length);
+ }
}
Modified: trunk/freenet/src/freenet/support/Bucket.java
===================================================================
--- trunk/freenet/src/freenet/support/Bucket.java 2005-10-27 17:49:43 UTC
(rev 7457)
+++ trunk/freenet/src/freenet/support/Bucket.java 2005-10-27 21:16:28 UTC
(rev 7458)
@@ -8,7 +8,10 @@
public interface Bucket {
/**
- * Returns an OutputStream that is used to put data in this Bucket.
+ * Returns an OutputStream that is used to put data in this Bucket, from
the
+ * beginning. It is not possible to append data to a Bucket! This
simplifies the
+ * code significantly for some classes. If you need to append, just pass
the
+ * OutputStream around.
*/
public OutputStream getOutputStream() throws IOException;
@@ -25,13 +28,6 @@
public String getName();
/**
- * If resetWrite() is called on the object, the next getOutputStream
- * should overwrite any other data in the bucket from the beginning,
- * otherwise it should append it.
- */
- public void resetWrite() throws IOException;
-
- /**
* Returns the amount of data currently in this bucket.
*/
public long size();
Modified: trunk/freenet/src/freenet/support/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/BucketTools.java 2005-10-27 17:49:43 UTC
(rev 7457)
+++ trunk/freenet/src/freenet/support/BucketTools.java 2005-10-27 21:16:28 UTC
(rev 7458)
@@ -1,6 +1,7 @@
package freenet.support;
import java.io.DataInputStream;
+import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
@@ -9,6 +10,8 @@
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
@@ -17,6 +20,8 @@
*/
public class BucketTools {
+ static final int BLOCK_SIZE = 4096;
+
/**
* Copy from the input stream of <code>src</code> to the output stream
of
* <code>dest</code>.
@@ -31,7 +36,7 @@
ReadableByteChannel readChannel = Channels.newChannel(in);
WritableByteChannel writeChannel = Channels.newChannel(out);
- ByteBuffer buffer = ByteBuffer.allocateDirect(Core.blockSize);
+ ByteBuffer buffer = ByteBuffer.allocateDirect(BLOCK_SIZE);
while (readChannel.read(buffer) != -1) {
buffer.flip();
writeChannel.write(buffer);
@@ -314,4 +319,28 @@
bucket.setReadOnly();
return bucket;
}
+
+ public static byte[] hash(Bucket data) throws IOException {
+ try {
+ MessageDigest md = MessageDigest.getInstance("SHA-256");
+ InputStream is = data.getInputStream();
+ long bucketLength = data.size();
+ long bytesRead = 0;
+ byte[] buf = new byte[4096];
+ while(bytesRead < bucketLength || bucketLength == -1) {
+ int readBytes = is.read(buf);
+ if(readBytes < 0) break;
+ bytesRead += readBytes;
+ md.update(buf, 0, readBytes);
+ }
+ if(bytesRead < bucketLength && bucketLength > 0)
+ throw new EOFException();
+ if(bytesRead != bucketLength && bucketLength > 0)
+ throw new IOException("Read "+bytesRead+" but
bucket length "+bucketLength+"!");
+ return md.digest();
+ } catch (NoSuchAlgorithmException e) {
+ Logger.error(BucketTools.class, "No such digest:
SHA-256 !!");
+ throw new Error("No such digest: SHA-256 !!");
+ }
+ }
}
Deleted: trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
2005-10-27 17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
2005-10-27 21:16:28 UTC (rev 7458)
@@ -1,55 +0,0 @@
-package freenet.support;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import freenet.crypt.PCFBMode;
-
-/**
- * A proxy Bucket which adds:
- * - Encryption with the supplied cipher.
- * - Padding to the next PO2 size.
- */
-public class PaddedEncryptedBucket implements Bucket {
-
- /**
- * Create a padded encrypted proxy bucket.
- * @param bucket The bucket which we are proxying to.
- * @param pcfb The encryption mode with which to encipher/decipher the
data.
- */
- public PaddedEncryptedBucket(Bucket bucket, PCFBMode pcfb, int minSize)
{
- // TODO Auto-generated constructor stub
- }
-
- public OutputStream getOutputStream() throws IOException {
- // TODO Auto-generated method stub
- return null;
- }
-
- public InputStream getInputStream() throws IOException {
- // TODO Auto-generated method stub
- return null;
- }
-
- public String getName() {
- // TODO Auto-generated method stub
- return null;
- }
-
- public void resetWrite() throws IOException {
- // TODO Auto-generated method stub
-
- }
-
- public long size() {
- // TODO Auto-generated method stub
- return 0;
- }
-
- public byte[] toByteArray() {
- // TODO Auto-generated method stub
- return null;
- }
-
-}
Copied: trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
(from rev 7451, trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java)
===================================================================
--- trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
2005-10-25 19:33:02 UTC (rev 7451)
+++ trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
2005-10-27 21:16:28 UTC (rev 7458)
@@ -0,0 +1,210 @@
+package freenet.support;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.spaceroots.mantissa.random.MersenneTwister;
+
+import freenet.crypt.PCFBMode;
+import freenet.crypt.RandomSource;
+import freenet.crypt.UnsupportedCipherException;
+import freenet.crypt.ciphers.Rijndael;
+
+/**
+ * A proxy Bucket which adds:
+ * - Encryption with the supplied cipher, and a random, ephemeral key.
+ * - Padding to the next PO2 size.
+ */
+public class PaddedEphemerallyEncryptedBucket implements Bucket {
+
+ private final Bucket bucket;
+ private final int minPaddedSize;
+ private final MersenneTwister paddingSource;
+ private final Rijndael aes;
+ private long dataLength;
+ private boolean readOnly;
+ private int lastOutputStream;
+
+ /**
+ * Create a padded encrypted proxy bucket.
+ * @param bucket The bucket which we are proxying to. Must be empty.
+ * @param pcfb The encryption mode with which to encipher/decipher the
data.
+ * @param minSize The minimum padded size of the file (after it has
been closed).
+ * @param origRandom Hard random number generator from which to obtain
a seed for padding.
+ * @throws UnsupportedCipherException
+ */
+ public PaddedEphemerallyEncryptedBucket(Bucket bucket, int minSize,
RandomSource origRandom) throws UnsupportedCipherException {
+ this.bucket = bucket;
+ if(bucket.size() != 0) throw new
IllegalArgumentException("Bucket must be empty");
+ aes = new Rijndael(256, 256);
+ byte[] key = new byte[32];
+ origRandom.nextBytes(key);
+ aes.initialize(key);
+ // Might as well blank it
+ for(int i=0;i<key.length;i++) key[i] = 0;
+ this.minPaddedSize = minSize;
+ paddingSource = new MersenneTwister(origRandom.nextLong());
+ readOnly = false;
+ lastOutputStream = 0;
+ }
+
+ public OutputStream getOutputStream() throws IOException {
+ if(readOnly) throw new IOException("Read only");
+ OutputStream os = bucket.getOutputStream();
+ dataLength = 0;
+ return new PaddedEphemerallyEncryptedOutputStream(os,
++lastOutputStream);
+ }
+
+ private class PaddedEphemerallyEncryptedOutputStream extends
OutputStream {
+
+ final PCFBMode pcfb;
+ final OutputStream out;
+ final int streamNumber;
+
+ public PaddedEphemerallyEncryptedOutputStream(OutputStream out,
int streamNumber) {
+ this.out = out;
+ dataLength = 0;
+ this.streamNumber = streamNumber;
+ pcfb = new PCFBMode(aes);
+ }
+
+ public void write(int b) throws IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
+ if(b < 0 || b > 255)
+ throw new IllegalArgumentException();
+ int toWrite = pcfb.encipher(b);
+ synchronized(PaddedEphemerallyEncryptedBucket.this) {
+ out.write(toWrite);
+ dataLength++;
+ }
+ }
+
+ public void write(byte[] buf, int offset, int length) throws
IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
+ byte[] enc = new byte[length];
+ System.arraycopy(buf, offset, enc, 0, length);
+ pcfb.blockEncipher(enc, 0, enc.length);
+ synchronized(PaddedEphemerallyEncryptedBucket.this) {
+ out.write(enc, 0, enc.length);
+ dataLength += enc.length;
+ }
+ }
+
+ // Override this or FOS will use write(int)
+ public void write(byte[] buf) throws IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
+ write(buf, 0, buf.length);
+ }
+
+ public void close() throws IOException {
+ if(streamNumber != lastOutputStream) {
+ Logger.normal(this, "Not padding out to length
because have been superceded: "+getName());
+ return;
+ }
+ synchronized(PaddedEphemerallyEncryptedBucket.this) {
+ long finalLength = paddedLength();
+ long padding = finalLength - dataLength;
+ byte[] buf = new byte[4096];
+ long writtenPadding = 0;
+ while(writtenPadding < padding) {
+ int left = Math.min((int) (padding -
writtenPadding), buf.length);
+ paddingSource.nextBytes(buf);
+ out.write(buf, 0, left);
+ writtenPadding += left;
+ }
+ }
+ }
+ }
+
+ public InputStream getInputStream() throws IOException {
+ return new
PaddedEphemerallyEncryptedInputStream(bucket.getInputStream());
+ }
+
+ private class PaddedEphemerallyEncryptedInputStream extends InputStream
{
+
+ final InputStream in;
+ final PCFBMode pcfb;
+ long ptr;
+
+ public PaddedEphemerallyEncryptedInputStream(InputStream in) {
+ this.in = in;
+ pcfb = new PCFBMode(aes);
+ ptr = 0;
+ }
+
+ public int read() throws IOException {
+ if(ptr > dataLength) return -1;
+ int x = in.read();
+ if(x == -1) return x;
+ ptr++;
+ return pcfb.decipher(x);
+ }
+
+ public final int available() {
+ return (int) (dataLength - ptr);
+ }
+
+ public int read(byte[] buf, int offset, int length) throws
IOException {
+ if(ptr > dataLength) return -1;
+ length = Math.min(length, available());
+ int readBytes = in.read(buf, offset, length);
+ if(readBytes <= 0) return readBytes;
+ ptr += dataLength;
+ pcfb.blockDecipher(buf, offset, readBytes);
+ return readBytes;
+ }
+
+ public int read(byte[] buf) throws IOException {
+ return read(buf, 0, buf.length);
+ }
+
+ public long skip(long bytes) throws IOException {
+ byte[] buf = new byte[(int)Math.min(4096, bytes)];
+ long skipped = 0;
+ while(skipped < bytes) {
+ int x = read(buf, 0,
(int)Math.min(bytes-skipped, buf.length));
+ if(x <= 0) return skipped;
+ skipped += x;
+ }
+ return skipped;
+ }
+ }
+
+ /**
+ * Return the length of the data in the proxied bucket, after padding.
+ */
+ public synchronized long paddedLength() {
+ long size = dataLength;
+ if(dataLength < minPaddedSize) dataLength = minPaddedSize;
+ if(size == minPaddedSize) return size;
+ long min = minPaddedSize;
+ long max = minPaddedSize << 1;
+ while(true) {
+ if(max < 0)
+ throw new Error("Impossible size: "+dataLength);
+ if(size > min && size < max) return max;
+ max = max << 1;
+ }
+ }
+
+ public String getName() {
+ return "Encrypted:"+bucket.getName();
+ }
+
+ public long size() {
+ return dataLength;
+ }
+
+ public boolean isReadOnly() {
+ return readOnly;
+ }
+
+ public void setReadOnly() {
+ readOnly = true;
+ }
+
+}
Added: trunk/freenet/src/freenet/support/RandomAccessFileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/RandomAccessFileBucket.java
2005-10-27 17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/support/RandomAccessFileBucket.java
2005-10-27 21:16:28 UTC (rev 7458)
@@ -0,0 +1,440 @@
+// REDFLAG: test and javadoc
+package freenet.support;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.util.Vector;
+
+/**
+ * Bucket implementation that can efficiently access any arbitrary byte-range
+ * of a file.
+ *
+ **/
+public class RandomAccessFileBucket implements Bucket {
+
+ public RandomAccessFileBucket(File file, long offset, long len, boolean
readOnly)
+ throws IOException {
+ if (!(file.exists() && file.canRead())) {
+ throw new IOException("Can't read file: " +
file.getAbsolutePath());
+ }
+
+ if ((!file.canWrite()) && (!readOnly)) {
+ throw new IOException("Can't write to file: " +
file.getAbsolutePath());
+ }
+
+ this.file = file;
+ this.readOnly = readOnly;
+ setRange(offset, len);
+ }
+
+ public synchronized void setRange(long offset, long len) throws
IOException {
+ if (isReleased()) {
+ throw new IOException("Attempt to use a released
RandomAccessFileBucket: " + getName() );
+ }
+
+ if (streams.size() > 0) {
+ throw new IllegalStateException("Can't reset range. There are
open streams.");
+ }
+ if ((offset < 0) || (len < 0) || (offset + len > file.length())) {
+ throw new IllegalArgumentException("Bad range arguments.");
+ }
+ this.offset = offset;
+ this.len = len;
+ localOffset = 0;
+ }
+
+ public static class Range {
+ Range(long offset, long len) {
+ this.offset = offset;
+ this.len = len;
+ }
+
+ public long offset;
+ public long len;
+ }
+
+ public final synchronized Range getRange() {
+ return new Range(offset, len);
+ }
+
+ // hmmm make protected???
+ public final synchronized boolean hasOpenStreams() {
+ return streams.size() > 0;
+ }
+
+ // Wrap non-const members so we can tell
+ // when code touches the Bucket after it
+ // has been released.
+ public synchronized InputStream getInputStream() throws IOException {
+ if (isReleased()) {
+ throw new IOException("Attempt to use a released
RandomAccessFileBucket: " + getName() );
+ }
+
+ InputStream newIn = new RAInputStream(this, file.getAbsolutePath());
+ streams.addElement(newIn);
+ return newIn;
+ }
+
+ public synchronized OutputStream getOutputStream() throws IOException {
+ if (isReleased()) {
+ throw new IOException("Attempt to use a released
RandomAccessBucket: " + getName() );
+ }
+
+ if (readOnly) {
+ throw new IOException("Tried to write a read-only Bucket.");
+ }
+
+ OutputStream newOut = new RAOutputStream(this, file.getAbsolutePath());
+ streams.addElement(newOut);
+ return newOut;
+ }
+
+ public String getName() {
+ return file.getAbsolutePath() + " [" + offset + ", " +
+ (offset + len - 1) + "]";
+ }
+
+ public synchronized void resetWrite() {
+ if (isReleased()) {
+ throw new RuntimeException("Attempt to use a released
RandomAccessFileBucket: " + getName() );
+ }
+ // REDFLAG: implicit assumptions
+ // 0) Bucket is only written to at a time.
+ // 1) The output stream is closed before the
+ // next is open. Ouch. This may cause problems...
+ localOffset = 0;
+ }
+
+ public long size() { return len; }
+
+ public synchronized boolean release() {
+ if (released) {
+ return true;
+ }
+
+ // Force all open streams closed.
+ // Windows won't let us delete the file unless we
+ // do this.
+ for (int i =0; i < streams.size(); i++) {
+ try {
+ if (streams.elementAt(i) instanceof InputStream) {
+ ((InputStream)streams.elementAt(i)).close();
+
+ Logger.debug(this, "closed open InputStream !: " +
+ file.getAbsolutePath());
+ }
+ else if (streams.elementAt(i) instanceof OutputStream) {
+ ((OutputStream)streams.elementAt(i)).close();
+ Logger.debug(this, "closed open OutputStream !: " +
+ file.getAbsolutePath());
+ }
+ }
+ catch (IOException ioe) {
+ }
+ }
+ streams.removeAllElements();
+ streams.trimToSize();
+ // We don't delete anything because we don't own anything.
+ released = true;
+ return true;
+ }
+
+ public synchronized final boolean isReleased() { return released; }
+
+ public void finalize() throws Throwable {
+ if (!released) {
+ release();
+ }
+ }
+
+ // REDFLAG: RETEST
+ // set blocks = -1 for until end.
+ // last block may have length < blockSize
+ public static Bucket[] segment(File file, int blockSize,
+ long offset, int blocks, boolean readOnly)
+ throws IOException {
+
+ if (!(file.exists() && file.canRead())) {
+ throw new IOException("Can't read file: " +
file.getAbsolutePath());
+ }
+
+ if ((!file.canWrite()) && (!readOnly)) {
+ throw new IOException("Can't write to file: " +
file.getAbsolutePath());
+ }
+
+ if ((offset < 0) || (offset >= file.length() - 1)) {
+ throw new IllegalArgumentException("offset: " + offset);
+ }
+
+ long length = file.length() - offset;
+ int nBlocks = (int) (length / blockSize);
+ if ((length % blockSize) != 0) {
+ nBlocks++;
+ }
+
+ if (blocks == -1) {
+ blocks = nBlocks;
+ }
+ else if ((blocks > nBlocks) || (blocks < 1)) {
+ throw new IllegalArgumentException("blocks: " + blocks);
+ }
+
+ Bucket[] ret = new Bucket[blocks];
+
+ for (int i = 0; i < blocks; i++) {
+ final long localOffset = i * blockSize + offset;
+ int blockLen = blockSize;
+ if (i == nBlocks - 1) {
+ blockLen = (int) (length - (nBlocks - 1) * blockSize);
+ }
+ ret[i] = new RandomAccessFileBucket(file, localOffset, blockLen,
readOnly);
+ }
+
+ return ret;
+ }
+
+ ////////////////////////////////////////////////////////////
+ // InputStream and OutputStream implementations
+ //
+ private final static boolean vociferous = false;
+
+ class RAInputStream extends InputStream {
+ public RAInputStream(RandomAccessFileBucket rafb, String prefix)
throws IOException {
+ this.rafb = rafb;
+ raf = new RandomAccessFile(rafb.file, "r");
+ raf.seek(offset);
+ println(" -- Created new InputStream [" + rafb.offset +
+ ", " + (rafb.offset + rafb.len -1) + "]" );
+ }
+
+ ////////////////////////////////////////////////////////////
+ // FilterInput implementation
+
+ private final int bytesLeft() throws IOException {
+ return (int)(rafb.offset + rafb.len - raf.getFilePointer());
+ }
+
+ public int read() throws java.io.IOException {
+ synchronized (rafb) {
+ println(".read()");
+ checkValid();
+ if (bytesLeft() < 1) {
+ return -1; // EOF
+ }
+ return raf.read();
+ }
+ }
+
+ public int read(byte[] bytes) throws java.io.IOException {
+ synchronized (rafb) {
+ println(".read(byte[])");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < 1) {
+ return -1; // EOF
+ }
+ if (nAvailable > bytes.length) {
+ nAvailable = bytes.length;
+ }
+ return raf.read(bytes, 0, nAvailable);
+ }
+ }
+
+ public int read(byte[] bytes, int a, int b) throws java.io.IOException
{
+ synchronized (rafb) {
+ println(".read(byte[], int, int)");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < 1) {
+ return -1; // EOF
+ }
+ if (nAvailable > b) {
+ nAvailable = b;
+ }
+ return raf.read(bytes, a, nAvailable);
+ }
+ }
+
+ public long skip(long a) throws java.io.IOException {
+ synchronized (rafb) {
+ println(".skip(long)");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < 1) {
+ return -1; // EOF
+ }
+ if (nAvailable > a) {
+ nAvailable = (int)a;
+ }
+
+ return raf.skipBytes(nAvailable);
+ }
+ }
+
+ public int available() throws java.io.IOException {
+ synchronized (rafb) {
+ println(".available()");
+ checkValid();
+ return bytesLeft();
+ }
+ }
+
+ public void close() throws java.io.IOException {
+ synchronized (rafb) {
+ println(".close()");
+ checkValid();
+ raf.close();
+ if (rafb.streams.contains(RAInputStream.this)) {
+ rafb.streams.removeElement(RAInputStream.this);
+ }
+ rafb.streams.trimToSize();
+ }
+ }
+
+ // LATER: support if really needed.
+ public void mark(int a) {
+ // NOP
+ }
+
+ public void reset() {
+ // NOP
+ }
+
+ public boolean markSupported() {
+ return false;
+ }
+
+ private final void println(String text) {
+ if (vociferous) {
+ Logger.debug(this, text);
+ }
+ }
+
+ private final void checkValid() throws IOException {
+ if (rafb.released) {
+ throw new IOException("Attempt to use a released
RandomAccessFileBucket: " + prefix);
+ }
+ }
+
+ ////////////////////////////////////////////////////////////
+ private RandomAccessFileBucket rafb = null;
+ private RandomAccessFile raf = null;
+ private String prefix = "";
+ }
+
+ private class RAOutputStream extends OutputStream {
+ public RAOutputStream(RandomAccessFileBucket rafb, String pref) throws
IOException {
+ this.rafb = rafb;
+ raf = new RandomAccessFile(rafb.file, "rw");
+ raf.seek(rafb.offset + rafb.localOffset);
+ println(" -- Created new OutputStream [" + rafb.offset + ", "
+ + (rafb.offset + rafb.len -1) + "]" );
+ }
+
+ ////////////////////////////////////////////////////////////
+ // OutputStream implementation
+ public void write(int b) throws IOException {
+ synchronized (rafb) {
+ println(".write(b)");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < 1) {
+ throw new IOException("Attempt to write past end of
Bucket.");
+ }
+ raf.write(b);
+ }
+ }
+
+ public void write(byte[] buf) throws IOException {
+ synchronized (rafb) {
+ println(".write(buf)");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < buf.length) {
+ throw new IOException("Attempt to write past end of
Bucket.");
+ }
+ raf.write(buf);
+ }
+ }
+
+ public void write(byte[] buf, int off, int len) throws IOException {
+ synchronized (rafb) {
+ println(".write(buf,off,len)");
+ checkValid();
+ int nAvailable = bytesLeft();
+ if (nAvailable < len) {
+ throw new IOException("Attempt to write past end of
Bucket.");
+ }
+ raf.write(buf, off, len);
+ }
+ }
+
+ public void flush() throws IOException {
+ synchronized (rafb) {
+ println(".flush()");
+ checkValid();
+ // NOP? Bytes written immediately?
+ // REDFLAG: double check.
+ }
+ }
+
+ public void close() throws IOException {
+ synchronized (rafb) {
+ println(".close()");
+ checkValid();
+ if (rafb.streams.contains(RAOutputStream.this)) {
+ rafb.streams.removeElement(RAOutputStream.this);
+ }
+ rafb.streams.trimToSize();
+ long added = raf.getFilePointer() - rafb.offset;
+ if (added > 0) {
+ // To get proper append behavior.
+ rafb.localOffset = added;
+ }
+
+ raf.close();
+ }
+ }
+
+ ////////////////////////////////////////////////////////////
+ private void println(String text) {
+ if (vociferous) {
+ Logger.debug(this, text);
+ }
+ }
+
+ private final void checkValid() throws IOException {
+ if (rafb.isReleased()) {
+ throw new IOException("Attempt to use a released
RandomAccessFileBucket: " + prefix);
+ }
+ }
+ private final int bytesLeft() throws IOException {
+ return (int)(rafb.offset + rafb.len - raf.getFilePointer());
+ }
+
+ private RandomAccessFileBucket rafb = null;
+ private RandomAccessFile raf = null;
+ private String prefix = "";
+
+ }
+ ////////////////////////////////////////////////////////////
+
+ private File file = null;
+ private long offset = -1;
+ private long localOffset = 0;
+ private long len = -1;
+ private boolean readOnly = false;
+ private boolean released = false;
+ private Vector streams = new Vector();
+
+ public boolean isReadOnly() {
+ return readOnly;
+ }
+
+ public void setReadOnly() {
+ readOnly = true;
+ }
+}
Modified: trunk/freenet/src/freenet/support/io/FileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileBucket.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/support/io/FileBucket.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -21,20 +21,23 @@
protected File file;
protected boolean readOnly;
- protected boolean restart = true;
protected boolean newFile; // hack to get around deletes
protected long length;
// JVM caches File.size() and there is no way to flush the cache, so we
// need to track it ourselves
- protected long fileRestartCounter;
+
+ private int lastOutputStream;
protected static String tempDir = null;
/**
* Creates a new FileBucket.
*
- * @param file
- * The File to read and write to.
+ * @param file The File to read and write to.
+ * @param readOnly If true, any attempt to write to the bucket will
result in an IOException.
+ * Can be set later. Irreversible. @see isReadOnly(), setReadOnly()
+ * @param deleteOnExit If true, delete the file on finalization.
+ *
*/
public FileBucket(File file, boolean readOnly, boolean deleteOnExit) {
this.readOnly = readOnly;
@@ -46,7 +49,6 @@
// System.err.println("-- FileBucket.ctr(0) -- " +
// file.getAbsolutePath());
// (new Exception("get stack")).printStackTrace();
- fileRestartCounter = 0;
if(file.exists()) {
length = file.length();
if(!file.canWrite())
@@ -80,30 +82,19 @@
synchronized (this) {
if(readOnly)
throw new IOException("Bucket is read-only");
- boolean append = !restart;
- if (restart)
- fileRestartCounter++;
- // we want the length of the file we are currently
writing to
// FIXME: behaviour depends on UNIX semantics, to
totally abstract
// it out we would have to kill the old write streams
here
// FIXME: what about existing streams? Will ones on
append append
// to the new truncated file? Do we want them to? What
about
// truncated ones? We should kill old streams here,
right?
- restart = false;
- return newFileBucketOutputStream(
- file.getPath(),
- append,
- fileRestartCounter);
+ return newFileBucketOutputStream(file.getPath(),
++lastOutputStream);
}
}
protected FileBucketOutputStream newFileBucketOutputStream(
- String s,
- boolean append,
- long restartCount)
- throws IOException {
- return new FileBucketOutputStream(s, append, restartCount);
+ String s, int streamNumber) throws IOException {
+ return new FileBucketOutputStream(s, streamNumber);
}
protected void resetLength() {
@@ -112,50 +103,44 @@
class FileBucketOutputStream extends FileOutputStream {
- long restartCount;
- Exception e;
-
+ private int streamNumber;
+
protected FileBucketOutputStream(
- String s,
- boolean append,
- long restartCount)
+ String s, int streamNumber)
throws FileNotFoundException {
- super(s, append);
- this.restartCount = restartCount;
- // if we throw, we're screwed anyway
- if (!append) {
- resetLength();
- }
- if (Logger.shouldLog(Logger.DEBUG, this))
- e = new Exception("debug");
+ super(s, false);
+ resetLength();
+ this.streamNumber = streamNumber;
}
- protected void confirmWriteSynchronized() {
- if (fileRestartCounter > restartCount)
- throw new IllegalStateException("writing to
file after restart");
- }
-
public void write(byte[] b) throws IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
synchronized (FileBucket.this) {
- confirmWriteSynchronized();
+ if(readOnly)
+ throw new IOException("Bucket is
read-only");
super.write(b);
length += b.length;
}
}
public void write(byte[] b, int off, int len) throws
IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
synchronized (FileBucket.this) {
- confirmWriteSynchronized();
+ if(readOnly)
+ throw new IOException("Bucket is
read-only");
super.write(b, off, len);
length += len;
}
}
public void write(int b) throws IOException {
+ if(streamNumber != lastOutputStream)
+ throw new IllegalStateException("Writing to old
stream in "+getName());
synchronized (FileBucket.this) {
- confirmWriteSynchronized();
- if (fileRestartCounter > restartCount)
- throw new
IllegalStateException("writing to file after restart");
+ if(readOnly)
+ throw new IOException("Bucket is
read-only");
super.write(b);
length++;
}
@@ -185,10 +170,6 @@
return file.getName();
}
- public void resetWrite() {
- restart = true;
- }
-
public long size() {
return length;
}
Modified: trunk/freenet/src/freenet/support/io/FileBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileBucketFactory.java 2005-10-27
17:49:43 UTC (rev 7457)
+++ trunk/freenet/src/freenet/support/io/FileBucketFactory.java 2005-10-27
21:16:28 UTC (rev 7458)
@@ -40,7 +40,7 @@
// e.printStackTrace();
// System.err.println("----------------------------------------");
} while (f.exists());
- Bucket b = new FileBucket(f, false);
+ Bucket b = new FileBucket(f, false, false);
files.addElement(f);
return b;
}
_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs