Author: toad
Date: 2005-10-25 19:33:02 +0000 (Tue, 25 Oct 2005)
New Revision: 7451

Added:
   trunk/freenet/src/freenet/support/BucketTools.java
   trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
   trunk/freenet/src/freenet/support/io/
   trunk/freenet/src/freenet/support/io/FileBucket.java
   trunk/freenet/src/freenet/support/io/FileBucketFactory.java
   trunk/freenet/src/freenet/support/io/FileUtil.java
   trunk/freenet/src/freenet/support/io/NullInputStream.java
   trunk/freenet/src/freenet/support/io/NullOutputStream.java
Modified:
   trunk/freenet/src/freenet/client/ArchiveElement.java
   trunk/freenet/src/freenet/client/ArchiveManager.java
   trunk/freenet/src/freenet/client/FetchResult.java
   trunk/freenet/src/freenet/client/Fetcher.java
   trunk/freenet/src/freenet/support/BucketFactory.java
   trunk/freenet/src/freenet/support/LRUHashtable.java
Log:
More work on archives.

Modified: trunk/freenet/src/freenet/client/ArchiveElement.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveElement.java        2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/client/ArchiveElement.java        2005-10-25 
19:33:02 UTC (rev 7451)
@@ -29,9 +29,13 @@
        /**
         * Fetch the element.
         * If fetchContext is null, return null unless the data is cached.
-        * @throws ArchiveFailureException 
+        * @throws ArchiveFailureException If there was a fatal error in the 
archive extraction. 
+        * @throws ArchiveRestartException If the archive changed, and 
therefore we need to
+        * restart the request.
+        * @throws FetchException If we could not fetch the key.
+        * @throws MetadataParseException If the key's metadata was invalid.
         */
-       public Bucket get(ArchiveContext archiveContext, FetcherContext 
fetchContext, boolean inSplitZipManifest) throws ArchiveFailureException {
+       public Bucket get(ArchiveContext archiveContext, FetcherContext 
fetchContext, boolean inSplitZipManifest) throws ArchiveFailureException, 
MetadataParseException, FetchException, ArchiveRestartException {
                
                archiveContext.doLoopDetection(ckey);
                // AFTER the loop check (possible deadlocks)
@@ -42,9 +46,10 @@
                        if(fetchContext == null) return null;
                        Fetcher fetcher = new Fetcher(key, fetchContext, 
archiveContext);
                        FetchResult result = 
fetcher.realRun(inSplitZipManifest);
-                       if(result.succeeded())
+                       if(result.succeeded()) {
                                manager.extractToCache(key, archiveType, 
result.data, archiveContext);
-                       else
+                               return manager.getCached(key, filename);
+                       } else
                                throw new ArchiveFailureException("Fetch 
failed");
                }
        }

Modified: trunk/freenet/src/freenet/client/ArchiveManager.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveManager.java        2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/client/ArchiveManager.java        2005-10-25 
19:33:02 UTC (rev 7451)
@@ -9,11 +9,19 @@
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
+import freenet.crypt.PCFBMode;
+import freenet.crypt.RandomSource;
+import freenet.crypt.UnsupportedCipherException;
+import freenet.crypt.ciphers.Rijndael;
 import freenet.keys.ClientKey;
 import freenet.keys.FreenetURI;
 import freenet.support.Bucket;
+import freenet.support.HexUtil;
 import freenet.support.LRUHashtable;
 import freenet.support.Logger;
+import freenet.support.PaddedEncryptedBucket;
+import freenet.support.io.FileBucket;
+import freenet.support.io.FileUtil;
 
 /**
  * Cache of recently decoded archives:
@@ -24,7 +32,7 @@
  */
 public class ArchiveManager {
 
-       ArchiveManager(int maxHandlers, long maxCachedData, long 
maxArchiveSize, long maxArchivedFileSize, File cacheDir) {
+       ArchiveManager(int maxHandlers, long maxCachedData, long 
maxArchiveSize, long maxArchivedFileSize, File cacheDir, RandomSource random) {
                maxArchiveHandlers = maxHandlers;
                archiveHandlers = new LRUHashtable();
                this.maxCachedData = maxCachedData;
@@ -32,11 +40,14 @@
                storedData = new LRUHashtable();
                this.maxArchiveSize = maxArchiveSize;
                this.maxArchivedFileSize = maxArchivedFileSize;
+               this.random = random;
        }
 
+       final RandomSource random;
        final long maxArchiveSize;
        final long maxArchivedFileSize;
        
+       
        // ArchiveHandler's
        
        final int maxArchiveHandlers;
@@ -57,6 +68,7 @@
        // Data cache
        
        final long maxCachedData;
+       private long cachedData;
        final File cacheDir;
        final LRUHashtable storedData;
 
@@ -79,6 +91,8 @@
        public synchronized Bucket getCached(FreenetURI key, String filename) {
                MyKey k = new MyKey(key, filename);
                ArchiveStoreElement ase = (ArchiveStoreElement) 
storedData.get(k);
+               // Promote to top of LRU
+               storedData.push(k, ase);
                if(ase == null) return null;
                return ase.dataAsBucket();
        }
@@ -104,21 +118,85 @@
                }
        }
 
-       public class ArchiveStoreElement {
+       abstract class ArchiveElement {
                MyKey key;
+               
+               /** Expected to delete any stored data on disk, and decrement 
cachedData. */
+               public abstract void finalize();
+       }
+       
+       class ArchiveStoreElement extends ArchiveElement {
                boolean finalized;
-               // FIXME implement
+               File myFilename;
+               PaddedEncryptedBucket bucket;
+               FileBucket underBucket;
                
-               public Bucket dataAsBucket() {
-                       // FIXME implement
+               /**
+                * Create an ArchiveStoreElement from a TempStoreElement.
+                * @param key2 The key of the archive the file came from.
+                * @param realName The name of the file in that archive.
+                * @param temp The TempStoreElement currently storing the data.
+                */
+               ArchiveStoreElement(FreenetURI key2, String realName, 
TempStoreElement temp) {
+                       this.key = new MyKey(key2, realName);
+                       this.finalized = false;
+                       this.bucket = temp.bucket;
+                       this.underBucket = temp.underBucket;
+                       underBucket.setReadOnly();
+                       cachedData += spaceUsed();
                }
+
+               Bucket dataAsBucket() {
+                       return bucket;
+               }
+
+               long dataSize() {
+                       return bucket.size();
+               }
                
+               long spaceUsed() {
+                       return FileUtil.estimateUsage(myFilename, 
underBucket.size());
+               }
+               
+               public synchronized void finalize() {
+                       if(finalized) return;
+                       long sz = spaceUsed();
+                       underBucket.finalize();
+                       finalized = true;
+                       cachedData -= sz;
+               }
+       }
+
+       class ArchiveErrorElement extends ArchiveElement {
+
+               String error;
+               
+               public ArchiveErrorElement(FreenetURI key2, String name, String 
error) {
+                       key = new MyKey(key2, name);
+                       this.error = error;
+               }
+
                public void finalize() {
-                       // FIXME delete file
-                       // Can be called early so check
                }
+               
        }
-
+       
+       class TempStoreElement {
+               TempStoreElement(File myFile, FileBucket fb, 
PaddedEncryptedBucket encryptedBucket) {
+                       this.myFilename = myFile;
+                       this.underBucket = fb;
+                       this.bucket = encryptedBucket;
+               }
+               
+               File myFilename;
+               PaddedEncryptedBucket bucket;
+               FileBucket underBucket;
+               
+               public void finalize() {
+                       underBucket.finalize();
+               }
+       }
+       
        /**
         * Extract data to cache.
         * @param key The key the data was fetched from.
@@ -143,25 +221,26 @@
                                String name = entry.getName();
                                long size = entry.getSize();
                                if(size > maxArchivedFileSize) {
-                                       addErrorElement(key, name);
+                                       addErrorElement(key, name, "File too 
big: "+maxArchivedFileSize+" greater than current archived file size limit 
"+maxArchivedFileSize);
                                } else {
                                        // Read the element
                                        long realLen = 0;
-                                       Bucket output = 
makeTempStoreBucket(size);
+                                       TempStoreElement temp = 
makeTempStoreBucket(size);
+                                       Bucket output = temp.bucket;
                                        OutputStream out = 
output.getOutputStream();
                                        int readBytes;
 inner:                         while((readBytes = zis.read(buf)) > 0) {
                                                out.write(buf, 0, readBytes);
                                                readBytes += realLen;
                                                if(readBytes > 
maxArchivedFileSize) {
-                                                       addErrorElement(key, 
name);
+                                                       addErrorElement(key, 
name, "File too big: "+maxArchivedFileSize+" greater than current archived file 
size limit "+maxArchivedFileSize);
                                                        out.close();
-                                                       
dumpTempStoreBucket(output);
+                                                       temp.finalize();
                                                        continue outer;
                                                }
                                        }
                                        out.close();
-                                       addStoreElement(key, name, output);
+                                       addStoreElement(key, name, temp);
                                }
                        }
                } catch (IOException e) {
@@ -175,4 +254,55 @@
                                }
                }
        }
+
+       private void addErrorElement(FreenetURI key, String name, String error) 
{
+               ArchiveErrorElement element = new ArchiveErrorElement(key, 
name, error);
+               synchronized(storedData) {
+                       storedData.push(element.key, element);
+               }
+       }
+
+       private void addStoreElement(FreenetURI key, String name, 
TempStoreElement temp) {
+               ArchiveStoreElement element = new ArchiveStoreElement(key, 
name, temp);
+               synchronized(storedData) {
+                       storedData.push(element.key, element);
+                       trimStoredData();
+               }
+       }
+
+       /**
+        * Drop any stored data beyond the limit.
+        * Call synchronized on storedData.
+        */
+       private void trimStoredData() {
+               while(cachedData > maxCachedData) {
+                       ArchiveElement e = (ArchiveElement) 
storedData.popValue();
+                       e.finalize();
+               }
+       }
+
+       /** 
+        * Create a file Bucket in the store directory, encrypted using an 
ethereal key.
+        * This is not yet associated with a name, so will be deleted when it 
goes out
+        * of scope. Not counted towards allocated data as will be short-lived 
and will not
+        * go over the maximum size. Will obviously keep its key when we move 
it to main.
+        */
+       private TempStoreElement makeTempStoreBucket(long size) {
+               byte[] randomFilename = new byte[16]; // should be plenty
+               random.nextBytes(randomFilename);
+               String filename = HexUtil.bytesToHex(randomFilename);
+               File myFile = new File(cacheDir, filename);
+               FileBucket fb = new FileBucket(myFile, false, true);
+               
+               byte[] cipherKey = new byte[32];
+               random.nextBytes(cipherKey);
+               try {
+                       Rijndael aes = new Rijndael(256, 256);
+                       PCFBMode pcfb = new PCFBMode(aes);
+                       PaddedEncryptedBucket encryptedBucket = new 
PaddedEncryptedBucket(fb, pcfb, 1024);
+                       return new TempStoreElement(myFile, fb, 
encryptedBucket);
+               } catch (UnsupportedCipherException e) {
+                       throw new Error("Unsupported cipher: AES 256/256!", e);
+               }
+       }
 }

Modified: trunk/freenet/src/freenet/client/FetchResult.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchResult.java   2005-10-24 17:08:08 UTC 
(rev 7450)
+++ trunk/freenet/src/freenet/client/FetchResult.java   2005-10-25 19:33:02 UTC 
(rev 7451)
@@ -1,6 +1,9 @@
 package freenet.client;
 
+import java.io.IOException;
+
 import freenet.support.Bucket;
+import freenet.support.BucketTools;
 
 /**
  * Class to contain the result of a key fetch.
@@ -38,9 +41,10 @@
        
        /** Get the result as a simple byte array, even if we don't have it
         * as one. @throws OutOfMemoryError !!
+        * @throws IOException If it was not possible to read the data.
         */
-       public byte[] asByteArray() {
-               return data.toByteArray();
+       public byte[] asByteArray() throws IOException {
+               return BucketTools.toByteArray(data);
        }
        
        /** Get the result as a Bucket */

Modified: trunk/freenet/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java       2005-10-24 17:08:08 UTC 
(rev 7450)
+++ trunk/freenet/src/freenet/client/Fetcher.java       2005-10-25 19:33:02 UTC 
(rev 7451)
@@ -7,6 +7,7 @@
 import freenet.keys.KeyBlock;
 import freenet.keys.KeyDecodeException;
 import freenet.support.Bucket;
+import freenet.support.BucketTools;
 import freenet.support.Logger;
 
 /** Class that does the actual fetching. Does not have to have a user friendly
@@ -73,7 +74,7 @@
                        if(!key.isMetadata()) {
                                // Just return the data
                                try {
-                                       return new FetchResult(dm, 
ctx.bucketFactory.makeImmutableBucket(data));
+                                       return new FetchResult(dm, 
BucketTools.makeImmutableBucket(ctx.bucketFactory, data));
                                } catch (IOException e) {
                                        Logger.error(this, "Could not capture 
data - disk full?: "+e, e);
                                }

Modified: trunk/freenet/src/freenet/support/BucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/BucketFactory.java        2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/BucketFactory.java        2005-10-25 
19:33:02 UTC (rev 7451)
@@ -2,9 +2,9 @@
 
 import java.io.IOException;
 
+
 public interface BucketFactory {
     public Bucket makeBucket(long size) throws IOException;
-    public Bucket makeImmutableBucket(byte[] data) throws IOException;
     public void freeBucket(Bucket b) throws IOException;
 }
 

Added: trunk/freenet/src/freenet/support/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/BucketTools.java  2005-10-24 17:08:08 UTC 
(rev 7450)
+++ trunk/freenet/src/freenet/support/BucketTools.java  2005-10-25 19:33:02 UTC 
(rev 7451)
@@ -0,0 +1,317 @@
+package freenet.support;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Helper functions for working with Buckets.
+ */
+public class BucketTools {
+
+       /**
+        * Copy from the input stream of <code>src</code> to the output stream 
of
+        * <code>dest</code>.
+        * 
+        * @param src
+        * @param dst
+        * @throws IOException
+        */
+       public final static void copy(Bucket src, Bucket dst) throws 
IOException {
+               OutputStream out = dst.getOutputStream();
+               InputStream in = src.getInputStream();
+               ReadableByteChannel readChannel = Channels.newChannel(in);
+               WritableByteChannel writeChannel = Channels.newChannel(out);
+
+               ByteBuffer buffer = ByteBuffer.allocateDirect(Core.blockSize);
+               while (readChannel.read(buffer) != -1) {
+                       buffer.flip();
+                       writeChannel.write(buffer);
+                       buffer.clear();
+               }
+
+               writeChannel.close();
+               readChannel.close();
+               out.close();
+               in.close();
+       }
+
+       public final static void zeroPad(Bucket b, long size) throws 
IOException {
+               OutputStream out = b.getOutputStream();
+
+               // Initialized to zero by default.
+               byte[] buffer = new byte[16384];
+
+               long count = 0;
+               while (count < size) {
+                       long nRequired = buffer.length;
+                       if (nRequired > size - count) {
+                               nRequired = size - count;
+                       }
+                       out.write(buffer, 0, (int) nRequired);
+                       count += nRequired;
+               }
+
+               out.close();
+       }
+
+       public final static void paddedCopy(
+               Bucket from,
+               Bucket to,
+               long nBytes,
+               int blockSize)
+               throws IOException {
+
+               if (nBytes > blockSize) {
+                       throw new IllegalArgumentException("nBytes > 
blockSize");
+               }
+
+               OutputStream out = to.getOutputStream();
+               byte[] buffer = new byte[16384];
+               InputStream in = from.getInputStream();
+
+               long count = 0;
+               while (count != nBytes) {
+                       long nRequired = nBytes - count;
+                       if (nRequired > buffer.length) {
+                               nRequired = buffer.length;
+                       }
+                       long nRead = in.read(buffer, 0, (int) nRequired);
+                       if (nRead == -1) {
+                               throw new IOException("Not enough data in 
source bucket.");
+                       }
+                       out.write(buffer, 0, (int) nRead);
+                       count += nRead;
+               }
+
+               if (count < blockSize) {
+                       // hmmm... better to just allocate a new buffer
+                       // instead of explicitly zeroing the old one?
+                       // Zero pad to blockSize
+                       long padLength = buffer.length;
+                       if (padLength > blockSize - nBytes) {
+                               padLength = blockSize - nBytes;
+                       }
+                       for (int i = 0; i < padLength; i++) {
+                               buffer[i] = 0;
+                       }
+
+                       while (count != blockSize) {
+                               long nRequired = blockSize - count;
+                               if (blockSize - count > buffer.length) {
+                                       nRequired = buffer.length;
+                               }
+                               out.write(buffer, 0, (int) nRequired);
+                               count += nRequired;
+                       }
+               }
+               in.close();
+               out.close();
+       }
+
+       public static class BucketFactoryWrapper implements BucketFactory {
+               public BucketFactoryWrapper(BucketFactory bf) {
+                       BucketFactoryWrapper.this.bf = bf;
+               }
+               public Bucket makeBucket(long size) throws IOException {
+                       return bf.makeBucket(size);
+               }
+
+               public void freeBucket(Bucket b) throws IOException {
+                       if (b instanceof RandomAccessFileBucket) {
+                               ((RandomAccessFileBucket) b).release();
+                               return;
+                       }
+                       bf.freeBucket(b);
+               }
+               private BucketFactory bf = null;
+       }
+
+       public static Bucket[] makeBuckets(BucketFactory bf, int count, int 
size)
+               throws IOException {
+               Bucket[] ret = new Bucket[count];
+               for (int i = 0; i < count; i++) {
+                       ret[i] = bf.makeBucket(size);
+               }
+               return ret;
+       }
+
+       /**
+        * Free buckets. Get yer free buckets here! No charge! All you can carry
+        * free buckets!
+        * <p>
+        * If an exception happens the method will attempt to free the remaining
+        * buckets then retun the first exception. Buckets successfully freed 
are
+        * made <code>null</code> in the array.
+        * </p>
+        * 
+        * @param bf
+        * @param buckets
+        * @throws IOException
+        *             the first exception The <code>buckets</code> array will
+        */
+       public static void freeBuckets(BucketFactory bf, Bucket[] buckets)
+               throws IOException {
+               if (buckets == null) {
+                       return;
+               }
+
+               IOException firstIoe = null;
+
+               for (int i = 0; i < buckets.length; i++) {
+                       // Make sure we free any temp buckets on exception
+                       try {
+                               if (buckets[i] != null) {
+                                       bf.freeBucket(buckets[i]);
+                               }
+                               buckets[i] = null;
+                       } catch (IOException e) {
+                               if (firstIoe == null) {
+                                       firstIoe = e;
+                               }
+                       }
+               }
+
+               if (firstIoe != null) {
+                       throw firstIoe;
+               }
+       }
+
+       // Note: Not all buckets are allocated by the bf.
+       //       You must use the BucketFactoryWrapper class above
+       //       to free the returned buckets.
+       //
+       // Always returns blocks, blocks, even if it has to create
+       // zero padded ones.
+       public static Bucket[] splitFile(
+               File file,
+               int blockSize,
+               long offset,
+               int blocks,
+               boolean readOnly,
+               BucketFactoryWrapper bf)
+               throws IOException {
+
+               long len = file.length() - offset;
+               if (len > blocks * blockSize) {
+                       len = blocks * blockSize;
+               }
+
+               long padBlocks = 0;
+               if ((blocks * blockSize) - len >= blockSize) {
+                       padBlocks = ((blocks * blockSize) - len) / blockSize;
+               }
+
+               Bucket[] ret = new Bucket[blocks];
+               Bucket[] rab =
+                       RandomAccessFileBucket.segment(
+                               file,
+                               blockSize,
+                               offset,
+                               (int) (blocks - padBlocks),
+                               true);
+               System.arraycopy(rab, 0, ret, 0, rab.length);
+
+               boolean groovy = false;
+               try {
+                       if (len % blockSize != 0) {
+                               // Copy and zero pad final partial block
+                               Bucket partial = ret[rab.length - 1];
+                               ret[rab.length - 1] = bf.makeBucket(blockSize);
+                               paddedCopy(
+                                       partial,
+                                       ret[rab.length - 1],
+                                       len % blockSize,
+                                       blockSize);
+                       }
+
+                       // Trailing zero padded blocks
+                       for (int i = rab.length; i < ret.length; i++) {
+                               ret[i] = bf.makeBucket(blockSize);
+                               zeroPad(ret[i], blockSize);
+                       }
+                       groovy = true;
+               } finally {
+                       if (!groovy) {
+                               freeBuckets(bf, ret);
+                       }
+               }
+               return ret;
+       }
+
+       public final static int[] nullIndices(Bucket[] array) {
+               List list = new ArrayList();
+               for (int i = 0; i < array.length; i++) {
+                       if (array[i] == null) {
+                               list.add(new Integer(i));
+                       }
+               }
+
+               int[] ret = new int[list.size()];
+               for (int i = 0; i < list.size(); i++) {
+                       ret[i] = ((Integer) list.get(i)).intValue();
+               }
+               return ret;
+       }
+
+       public final static int[] nonNullIndices(Bucket[] array) {
+               List list = new ArrayList();
+               for (int i = 0; i < array.length; i++) {
+                       if (array[i] != null) {
+                               list.add(new Integer(i));
+                       }
+               }
+
+               int[] ret = new int[list.size()];
+               for (int i = 0; i < list.size(); i++) {
+                       ret[i] = ((Integer) list.get(i)).intValue();
+               }
+               return ret;
+       }
+
+       public final static Bucket[] nonNullBuckets(Bucket[] array) {
+               List list = new ArrayList(array.length);
+               for (int i = 0; i < array.length; i++) {
+                       if (array[i] != null) {
+                               list.add(array[i]);
+                       }
+               }
+
+               Bucket[] ret = new Bucket[list.size()];
+               return (Bucket[]) list.toArray(ret);
+       }
+
+       /**
+        * Read the entire bucket in as a byte array.
+        * Not a good idea unless it is very small!
+        * Don't call if concurrent writes may be happening.
+        * @throws IOException If there was an error reading from the bucket.
+        * @throws OutOfMemoryError If it was not possible to allocate enough 
+        * memory to contain the entire bucket.
+        */
+       public final static byte[] toByteArray(Bucket bucket) throws 
IOException {
+               long size = bucket.size();
+               if(size > Integer.MAX_VALUE) throw new OutOfMemoryError();
+               byte[] data = new byte[(int)size];
+               InputStream is = bucket.getInputStream();
+               DataInputStream dis = new DataInputStream(is);
+               dis.readFully(data);
+               return data;
+       }
+
+       public static Bucket makeImmutableBucket(BucketFactory bucketFactory, 
byte[] data) throws IOException {
+               Bucket bucket = bucketFactory.makeBucket(data.length);
+               OutputStream os = bucket.getOutputStream();
+               os.write(data);
+               bucket.setReadOnly();
+               return bucket;
+       }
+}

Modified: trunk/freenet/src/freenet/support/LRUHashtable.java
===================================================================
--- trunk/freenet/src/freenet/support/LRUHashtable.java 2005-10-24 17:08:08 UTC 
(rev 7450)
+++ trunk/freenet/src/freenet/support/LRUHashtable.java 2005-10-25 19:33:02 UTC 
(rev 7451)
@@ -35,7 +35,7 @@
     } 
 
     /**
-     *  @return Least recently pushed Object.
+     *  @return Least recently pushed key.
      */
     public final synchronized Object popKey() {
         if ( list.size() > 0 ) {
@@ -45,6 +45,17 @@
         }
     }
 
+    /**
+     * @return Least recently pushed value.
+     */
+    public final synchronized Object popValue() {
+        if ( list.size() > 0 ) {
+            return ((QItem)hash.remove(((QItem)list.pop()).obj)).value;
+        } else {
+            return null;
+        }
+    }
+    
     public final int size() {
         return list.size();
     }

Added: trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java        
2005-10-24 17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/PaddedEncryptedBucket.java        
2005-10-25 19:33:02 UTC (rev 7451)
@@ -0,0 +1,55 @@
+package freenet.support;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import freenet.crypt.PCFBMode;
+
+/**
+ * A proxy Bucket which adds:
+ * - Encryption with the supplied cipher.
+ * - Padding to the next PO2 size.
+ */
+public class PaddedEncryptedBucket implements Bucket {
+
+       /**
+        * Create a padded encrypted proxy bucket.
+        * @param bucket The bucket which we are proxying to.
+        * @param pcfb The encryption mode with which to encipher/decipher the 
data.
+        */
+       public PaddedEncryptedBucket(Bucket bucket, PCFBMode pcfb, int minSize) 
{
+               // TODO Auto-generated constructor stub
+       }
+
+       public OutputStream getOutputStream() throws IOException {
+               // TODO Auto-generated method stub
+               return null;
+       }
+
+       public InputStream getInputStream() throws IOException {
+               // TODO Auto-generated method stub
+               return null;
+       }
+
+       public String getName() {
+               // TODO Auto-generated method stub
+               return null;
+       }
+
+       public void resetWrite() throws IOException {
+               // TODO Auto-generated method stub
+
+       }
+
+       public long size() {
+               // TODO Auto-generated method stub
+               return 0;
+       }
+
+       public byte[] toByteArray() {
+               // TODO Auto-generated method stub
+               return null;
+       }
+
+}

Added: trunk/freenet/src/freenet/support/io/FileBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileBucket.java        2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/io/FileBucket.java        2005-10-25 
19:33:02 UTC (rev 7451)
@@ -0,0 +1,315 @@
+package freenet.support.io;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import freenet.crypt.RandomSource;
+import freenet.support.Bucket;
+import freenet.support.Logger;
+
+/**
+ * A file Bucket is an implementation of Bucket that writes to a file.
+ * 
+ * @author oskar
+ */
+public class FileBucket implements Bucket {
+
+       protected File file;
+       protected boolean readOnly;
+       protected boolean restart = true;
+       protected boolean newFile; // hack to get around deletes
+       protected long length;
+       // JVM caches File.size() and there is no way to flush the cache, so we
+       // need to track it ourselves
+       protected long fileRestartCounter;
+
+       protected static String tempDir = null;
+
+       /**
+        * Creates a new FileBucket.
+        * 
+        * @param file
+        *            The File to read and write to.
+        */
+       public FileBucket(File file, boolean readOnly, boolean deleteOnExit) {
+               this.readOnly = readOnly;
+               this.file = file;
+               this.newFile = deleteOnExit;
+               if(newFile)
+                       file.deleteOnExit();
+               // Useful for finding temp file leaks.
+               // System.err.println("-- FileBucket.ctr(0) -- " +
+               // file.getAbsolutePath());
+               // (new Exception("get stack")).printStackTrace();
+               fileRestartCounter = 0;
+               if(file.exists()) {
+                       length = file.length();
+                       if(!file.canWrite())
+                               readOnly = true;
+               }
+               else length = 0;
+       }
+
+       /**
+        * Creates a new FileBucket in a random temporary file in the temporary
+        * directory.
+        */
+
+       public FileBucket(RandomSource random) {
+               file =
+                       new File(
+                               tempDir,
+                               "t"
+                                       + Integer.toHexString(
+                                               Math.abs(random.nextInt())));
+               // Useful for finding temp file leaks.
+               //System.err.println("-- FileBucket.ctr(1) -- " +
+               // file.getAbsolutePath());
+               //(new Exception("get stack")).printStackTrace();
+               newFile = true;
+               length = 0;
+               file.deleteOnExit();
+       }
+
+       public OutputStream getOutputStream() throws IOException {
+               synchronized (this) {
+                       if(readOnly)
+                               throw new IOException("Bucket is read-only");
+                       boolean append = !restart;
+                       if (restart)
+                               fileRestartCounter++;
+                       // we want the length of the file we are currently 
writing to
+
+                       // FIXME: behaviour depends on UNIX semantics, to 
totally abstract
+                       // it out we would have to kill the old write streams 
here
+                       // FIXME: what about existing streams? Will ones on 
append append
+                       // to the new truncated file? Do we want them to? What 
about
+                       // truncated ones? We should kill old streams here, 
right?
+                       restart = false;
+                       return newFileBucketOutputStream(
+                               file.getPath(),
+                               append,
+                               fileRestartCounter);
+               }
+       }
+
+       protected FileBucketOutputStream newFileBucketOutputStream(
+               String s,
+               boolean append,
+               long restartCount)
+               throws IOException {
+               return new FileBucketOutputStream(s, append, restartCount);
+       }
+
+       protected void resetLength() {
+               length = 0;
+       }
+
+       class FileBucketOutputStream extends FileOutputStream {
+
+               long restartCount;
+               Exception e;
+
+               protected FileBucketOutputStream(
+                       String s,
+                       boolean append,
+                       long restartCount)
+                       throws FileNotFoundException {
+                       super(s, append);
+                       this.restartCount = restartCount;
+                       // if we throw, we're screwed anyway
+                       if (!append) {
+                               resetLength();
+                       }
+                       if (Logger.shouldLog(Logger.DEBUG, this))
+                               e = new Exception("debug");
+               }
+
+               protected void confirmWriteSynchronized() {
+                       if (fileRestartCounter > restartCount)
+                               throw new IllegalStateException("writing to 
file after restart");
+               }
+
+               public void write(byte[] b) throws IOException {
+                       synchronized (FileBucket.this) {
+                               confirmWriteSynchronized();
+                               super.write(b);
+                               length += b.length;
+                       }
+               }
+
+               public void write(byte[] b, int off, int len) throws 
IOException {
+                       synchronized (FileBucket.this) {
+                               confirmWriteSynchronized();
+                               super.write(b, off, len);
+                               length += len;
+                       }
+               }
+
+               public void write(int b) throws IOException {
+                       synchronized (FileBucket.this) {
+                               confirmWriteSynchronized();
+                               if (fileRestartCounter > restartCount)
+                                       throw new 
IllegalStateException("writing to file after restart");
+                               super.write(b);
+                               length++;
+                       }
+               }
+       }
+
+       class FileBucketInputStream extends FileInputStream {
+               Exception e;
+
+               public FileBucketInputStream(File f) throws IOException {
+                       super(f);
+                       if (Logger.shouldLog(Logger.DEBUG, this))
+                               e = new Exception("debug");
+               }
+       }
+
+       public InputStream getInputStream() throws IOException {
+               return file.exists()
+                       ? (InputStream) new FileBucketInputStream(file)
+                       : (InputStream) new NullInputStream();
+       }
+
+       /**
+        * @return the name of the file.
+        */
+       public String getName() {
+               return file.getName();
+       }
+
+       public void resetWrite() {
+               restart = true;
+       }
+
+       public long size() {
+               return length;
+       }
+
+       /**
+        * Returns the file object this buckets data is kept in.
+        */
+       public File getFile() {
+               return file;
+       }
+
+       /**
+        * Actually delete the underlying file. Called by finalizer, will not be
+        * called twice. But length must still be valid when calling it.
+        */
+       protected void deleteFile() {
+               file.delete();
+       }
+
+       public void finalize() {
+               if (Logger.shouldLog(Logger.DEBUG, this))
+                       Logger.debug(this,
+                               "FileBucket Finalizing " + file.getName());
+               if (newFile && file.exists()) {
+                       Logger.debug(this,
+                               "Deleting bucket " + file.getName());
+                       deleteFile();
+                       if (file.exists())
+                               Logger.error(this,
+                                       "Delete failed on bucket " + 
file.getName());
+               }
+               if (Logger.shouldLog(Logger.DEBUG, this))
+                       Logger.debug(this,
+                               "FileBucket Finalized " + file.getName());
+       }
+
+       /**
+        * Return directory used for temp files.
+        */
+       public final synchronized static String getTempDir() {
+               return tempDir;
+       }
+
+       /**
+        * Set temp file directory.
+        * <p>
+        * The directory must exist.
+        */
+       public final synchronized static void setTempDir(String dirName) {
+               File dir = new File(dirName);
+               if (!(dir.exists() && dir.isDirectory() && dir.canWrite())) {
+                       throw new IllegalArgumentException(
+                               "Bad Temp Directory: " + dir.getAbsolutePath());
+               }
+               tempDir = dirName;
+       }
+
+       // determine the temp directory in one of several ways
+
+       static {
+               // Try the Java property (1.2 and above)
+               tempDir = System.getProperty("java.io.tmpdir");
+
+               // Deprecated calls removed.
+
+               // Try TEMP and TMP
+               //      if (tempDir == null) {
+               //          tempDir = System.getenv("TEMP");
+               //      }
+
+               //      if (tempDir == null) {
+               //          tempDir = System.getenv("TMP");
+               //      }
+
+               // make some semi-educated guesses based on OS.
+
+               if (tempDir == null) {
+                       String os = System.getProperty("os.name");
+                       if (os != null) {
+
+                               String[] candidates = null;
+
+                               // XXX: Add more possible OSes here.
+                               if (os.equalsIgnoreCase("Linux")
+                                       || os.equalsIgnoreCase("FreeBSD")) {
+                                       String[] linuxCandidates = { "/tmp", 
"/var/tmp" };
+                                       candidates = linuxCandidates;
+                               } else if (os.equalsIgnoreCase("Windows")) {
+                                       String[] windowsCandidates =
+                                               { "C:\\TEMP", 
"C:\\WINDOWS\\TEMP" };
+                                       candidates = windowsCandidates;
+                               }
+
+                               if (candidates != null) {
+                                       for (int i = 0; i < candidates.length; 
i++) {
+                                               File path = new 
File(candidates[i]);
+                                               if (path.exists()
+                                                       && path.isDirectory()
+                                                       && path.canWrite()) {
+                                                       tempDir = candidates[i];
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               // last resort -- use current working directory
+
+               if (tempDir == null) {
+                       // This can be null -- but that's OK, null => cwd for 
File
+                       // constructor, anyways.
+                       tempDir = System.getProperty("user.dir");
+               }
+       }
+
+       public boolean isReadOnly() {
+               return readOnly;
+       }
+
+       public void setReadOnly() {
+               readOnly = true;
+       }
+}

Added: trunk/freenet/src/freenet/support/io/FileBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileBucketFactory.java 2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/io/FileBucketFactory.java 2005-10-25 
19:33:02 UTC (rev 7451)
@@ -0,0 +1,58 @@
+package freenet.support.io;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Vector;
+
+import freenet.support.Bucket;
+import freenet.support.BucketFactory;
+import freenet.support.Logger;
+
+public class FileBucketFactory implements BucketFactory {
+    
+    private int enumm = 0;
+    private Vector files = new Vector();
+    
+    // Must have trailing "/"
+    public String rootDir = "";
+
+    public FileBucketFactory() {
+        
+    }
+
+    public FileBucketFactory(String rootDir) {
+        this.rootDir = (rootDir.endsWith(File.separator)
+                        ? rootDir
+                        : (rootDir + File.separator));
+    }
+
+    public FileBucketFactory(File dir) {
+        this(dir.toString());
+    }
+
+    public Bucket makeBucket(long size) {
+        File f;
+        do {
+            f = new File(rootDir + "bffile_" + ++enumm);
+            // REDFLAG: remove hoaky debugging code
+            // System.err.println("----------------------------------------");
+            // Exception e = new Exception("created: " + f.getName());
+            // e.printStackTrace();
+            // System.err.println("----------------------------------------");
+        } while (f.exists());
+        Bucket b = new FileBucket(f, false);
+        files.addElement(f);
+        return b;
+    }
+
+    public void freeBucket(Bucket b) throws IOException {
+        if (!(b instanceof FileBucket)) throw new IOException("not a 
FileBucket!");
+        File f = ((FileBucket) b).getFile();
+        //System.err.println("FREEING: " + f.getName());
+        if (files.removeElement(f)) {
+            if (!f.delete())
+                Logger.error(this, "Delete failed on bucket "+f.getName(), new 
Exception());
+           files.trimToSize();
+       }
+    }
+}

Added: trunk/freenet/src/freenet/support/io/FileUtil.java
===================================================================
--- trunk/freenet/src/freenet/support/io/FileUtil.java  2005-10-24 17:08:08 UTC 
(rev 7450)
+++ trunk/freenet/src/freenet/support/io/FileUtil.java  2005-10-25 19:33:02 UTC 
(rev 7451)
@@ -0,0 +1,25 @@
+package freenet.support.io;
+
+import java.io.File;
+
+public class FileUtil {
+
+       /**
+        * Guesstimate real disk usage for a file with a given filename, of a 
given length.
+        */
+       public static long estimateUsage(File file, long l) {
+               /**
+                * It's possible that none of these assumptions are accurate 
for any filesystem;
+                * this is intended to be a plausible worst case.
+                */
+               // Assume 4kB clusters for calculating block usage (NTFS)
+               long blockUsage = ((l / 4096) + (l % 4096 > 0 ? 1 : 0)) * 4096;
+               // Assume 512 byte filename entries, with 100 bytes overhead, 
for filename overhead (NTFS)
+               String filename = file.getName();
+               int nameLength = filename.getBytes().length + 100;
+               long filenameUsage = ((nameLength / 4096) + (nameLength % 4096 
> 0 ? 1 : 0)) * 4096;
+               // Assume 50 bytes per block tree overhead with 1kB blocks 
(reiser3 worst case)
+               long extra = ((l / 1024) + (l % 1024 > 0 ? 1 : 0) + 1) * 50;
+               return blockUsage + filenameUsage + extra;
+       }
+}

Added: trunk/freenet/src/freenet/support/io/NullInputStream.java
===================================================================
--- trunk/freenet/src/freenet/support/io/NullInputStream.java   2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/io/NullInputStream.java   2005-10-25 
19:33:02 UTC (rev 7451)
@@ -0,0 +1,8 @@
+package freenet.support.io;
+import java.io.*;
+
+public class NullInputStream extends InputStream {
+    public NullInputStream() {}
+    public int read() { return -1; }
+}
+

Added: trunk/freenet/src/freenet/support/io/NullOutputStream.java
===================================================================
--- trunk/freenet/src/freenet/support/io/NullOutputStream.java  2005-10-24 
17:08:08 UTC (rev 7450)
+++ trunk/freenet/src/freenet/support/io/NullOutputStream.java  2005-10-25 
19:33:02 UTC (rev 7451)
@@ -0,0 +1,9 @@
+package freenet.support.io;
+import java.io.*;
+
+public class NullOutputStream extends OutputStream {
+    public NullOutputStream() {}
+    public void write(int b) {}
+    public void write(byte[] buf, int off, int len) {}
+}
+

_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs

Reply via email to