Author: toad
Date: 2005-10-22 20:57:25 +0000 (Sat, 22 Oct 2005)
New Revision: 7449

Added:
   trunk/freenet/src/freenet/client/ArchiveContext.java
   trunk/freenet/src/freenet/client/ArchiveElement.java
   trunk/freenet/src/freenet/client/ArchiveFailureException.java
   trunk/freenet/src/freenet/client/ArchiveRestartException.java
   trunk/freenet/src/freenet/client/SplitFetcher.java
Modified:
   trunk/freenet/src/freenet/client/ArchiveHandler.java
   trunk/freenet/src/freenet/client/ArchiveManager.java
   trunk/freenet/src/freenet/client/ClientMetadata.java
   trunk/freenet/src/freenet/client/FetchException.java
   trunk/freenet/src/freenet/client/Fetcher.java
   trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
   trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
   trunk/freenet/src/freenet/client/Metadata.java
   trunk/freenet/src/freenet/keys/ClientCHK.java
   trunk/freenet/src/freenet/support/Bucket.java
Log:
Lots more work on metadata/splitfiles/archive manifests/etc.
Still doesn't build.

Added: trunk/freenet/src/freenet/client/ArchiveContext.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveContext.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveContext.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -0,0 +1,24 @@
+package freenet.client;
+
+import java.util.HashSet;
+
+import freenet.keys.ClientKey;
+
+/**
+ * Object passed down a full fetch, including all the recursion.
+ * Used, at present, for detecting archive fetch loops, hence the
+ * name.
+ */
+public class ArchiveContext {
+
+       HashSet soFar = new HashSet();
+       int maxArchiveLevels;
+       
+       public synchronized void doLoopDetection(ClientKey key) throws 
ArchiveFailureException {
+               if(!soFar.add(key))
+                       throw new ArchiveFailureException("Archive loop 
detected");
+               if(soFar.size() > maxArchiveLevels)
+                       throw new 
ArchiveFailureException(ArchiveFailureException.TOO_MANY_LEVELS);
+       }
+
+}

Added: trunk/freenet/src/freenet/client/ArchiveElement.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveElement.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveElement.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -0,0 +1,44 @@
+package freenet.client;
+
+import freenet.keys.ClientKey;
+import freenet.keys.FreenetURI;
+import freenet.support.Bucket;
+
+/**
+ * An element in an archive. Does synchronization (on fetches, to avoid
+ * having to do them twice), checks cache, does fetch, adds to cache.
+ *
+ * DO LOOP DETECTION!
+ */
+public class ArchiveElement {
+
+       ArchiveElement(ArchiveManager manager, FreenetURI uri, ClientKey ckey, 
String filename) {
+               this.manager = manager;
+               this.key = uri;
+               this.ckey = ckey;
+               this.filename = filename;
+       }
+       
+       final ArchiveManager manager;
+       final FreenetURI key;
+       final ClientKey ckey;
+       final String filename;
+       
+       /**
+        * Fetch the element.
+        * @throws ArchiveFailureException 
+        */
+       public Bucket get(ArchiveContext archiveContext, FetcherContext 
fetchContext) throws ArchiveFailureException {
+               
+               archiveContext.doLoopDetection(ckey);
+               // AFTER the loop check (possible deadlocks)
+               synchronized(this) {
+                       // Synchronized during I/O to avoid doing it twice
+                       Bucket cached = manager.getCached(key, filename);
+                       if(cached != null) return cached;
+                       Fetcher fetcher = new Fetcher(key, fetchContext, 
archiveContext);
+                       fetcher.realRun();
+                       manager.extractToCache(key, archiveContext);
+               }
+       }
+}

Added: trunk/freenet/src/freenet/client/ArchiveFailureException.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveFailureException.java       
2005-10-22 18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveFailureException.java       
2005-10-22 20:57:25 UTC (rev 7449)
@@ -0,0 +1,14 @@
+package freenet.client;
+
+/**
+ * Thrown when an archive operation fails.
+ */
+public class ArchiveFailureException extends Exception {
+
+       public static final String TOO_MANY_LEVELS = "Too many archive levels";
+
+       public ArchiveFailureException(String message) {
+               super(message);
+       }
+
+}

Modified: trunk/freenet/src/freenet/client/ArchiveHandler.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveHandler.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveHandler.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -1,12 +1,40 @@
 package freenet.client;
 
+import freenet.keys.ClientKey;
+import freenet.support.Bucket;
+
 /**
  * Handles a single archive for ZIP manifests.
  */
 class ArchiveHandler {
 
+       private ArchiveManager manager;
+       private ClientKey key;
+       
+       public ArchiveHandler(ArchiveManager manager, ClientKey key, short 
archiveType) {
+               this.manager = manager;
+               this.key = key;
+       }
+
        public void finalize() {
-               // FIXME: implement
+               // Need to do anything here?
        }
-       
+
+       /**
+        * Get the metadata for this ZIP manifest, as a Bucket.
+        */
+       public Bucket getMetadata(ArchiveContext archiveContext, FetcherContext 
fetchContext) throws ArchiveFailureException, ArchiveRestartException {
+               return get(".metadata", archiveContext, fetchContext);
+       }
+
+       /**
+        * Get a file from this ZIP manifest, as a Bucket.
+        * If possible, read it from cache. If necessary, refetch the 
+        * container and extract it. If that fails, throw.
+        */
+       public synchronized Bucket get(String internalName, ArchiveContext 
archiveContext, FetcherContext fetchContext) throws ArchiveFailureException, 
ArchiveRestartException {
+               ArchiveElement element = 
+                       manager.makeElement(key, internalName);
+               return element.get(archiveContext, fetchContext);
+       }
 }

Modified: trunk/freenet/src/freenet/client/ArchiveManager.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveManager.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveManager.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -5,6 +5,8 @@
 import java.util.Set;
 
 import freenet.keys.ClientKey;
+import freenet.keys.FreenetURI;
+import freenet.support.Bucket;
 import freenet.support.LRUHashtable;
 
 /**
@@ -16,9 +18,12 @@
  */
 public class ArchiveManager {
 
-       ArchiveManager(int maxHandlers) {
+       ArchiveManager(int maxHandlers, long maxCachedData, File cacheDir) {
                maxArchiveHandlers = maxHandlers;
                archiveHandlers = new LRUHashtable();
+               this.maxCachedData = maxCachedData;
+               this.cacheDir = cacheDir;
+               storedData = new LRUHashtable();
        }
 
        // ArchiveHandler's
@@ -43,4 +48,64 @@
        final long maxCachedData;
        final File cacheDir;
        final LRUHashtable storedData;
+
+       /**
+        * Create an archive handler. This does not need to know how to
+        * fetch the key, because the methods called later will ask.
+        * It will try to serve from cache, but if that fails, will
+        * re-fetch.
+        * @param key The key of the archive that we are extracting data from.
+        * @return An archive handler. 
+        */
+       public synchronized ArchiveHandler makeHandler(ClientKey key, short 
archiveType) {
+               ArchiveHandler handler = getCached(key);
+               if(handler != null) return handler;
+               handler = new ArchiveHandler(this, key, archiveType);
+               putCached(key, handler);
+               return handler;
+       }
+
+       public synchronized Bucket getCached(FreenetURI key, String filename) {
+               MyKey k = new MyKey(key, filename);
+               ArchiveStoreElement ase = (ArchiveStoreElement) 
storedData.get(k);
+               if(ase == null) return null;
+               return ase.dataAsBucket();
+       }
+       
+       public class MyKey {
+               final FreenetURI key;
+               final String filename;
+               
+               public MyKey(FreenetURI key2, String filename2) {
+                       key = key2;
+                       filename = filename2;
+               }
+
+               public boolean equals(Object o) {
+                       if(this == o) return true;
+                       if(!(o instanceof MyKey)) return false;
+                       MyKey cmp = ((MyKey)o);
+                       return (cmp.key.equals(key) && 
cmp.filename.equals(filename));
+               }
+               
+               public int hashCode() {
+                       return key.hashCode() ^ filename.hashCode();
+               }
+       }
+
+       public class ArchiveStoreElement {
+               MyKey key;
+               boolean finalized;
+               // FIXME implement
+
+               public Bucket dataAsBucket() {
+                       // FIXME implement
+               }
+               
+               public void finalize() {
+                       // FIXME delete file
+                       // Can be called early so check
+               }
+       }
+
 }

Added: trunk/freenet/src/freenet/client/ArchiveRestartException.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveRestartException.java       
2005-10-22 18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ArchiveRestartException.java       
2005-10-22 20:57:25 UTC (rev 7449)
@@ -0,0 +1,10 @@
+package freenet.client;
+
+/**
+ * Thrown when we need to restart a fetch process because of a problem
+ * with an archive. This is usually because an archive has changed
+ * since we last checked.
+ */
+public class ArchiveRestartException extends Exception {
+
+}

Modified: trunk/freenet/src/freenet/client/ClientMetadata.java
===================================================================
--- trunk/freenet/src/freenet/client/ClientMetadata.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/ClientMetadata.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -11,7 +11,27 @@
        /** The document MIME type */
        private String mimeType;
 
+       ClientMetadata(String mime) {
+               mimeType = mime;
+       }
+
+       /** Create an empty ClientMetadata instance */
+       ClientMetadata() {
+               mimeType = null;
+       }
+       
        public String getMIMEType() {
+               if(mimeType == null || mimeType.length() == 0)
+                       return DEFAULT_MIME_TYPE;
                return mimeType;
        }
+
+       /**
+        * Merge the given ClientMetadata, without overwriting our
+        * existing information.
+        */
+       public void mergeNoOverwrite(ClientMetadata clientMetadata) {
+               if(mimeType == null || mimeType.equals(""))
+                       mimeType = clientMetadata.mimeType;
+       }
 }

Modified: trunk/freenet/src/freenet/client/FetchException.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java        2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/FetchException.java        2005-10-22 
20:57:25 UTC (rev 7449)
@@ -8,11 +8,27 @@
                mode = m;
        }
 
+       public FetchException(MetadataParseException e) {
+               mode = INVALID_METADATA;
+               initCause(e);
+       }
+
+       public FetchException(ArchiveFailureException e) {
+               mode = ARCHIVE_FAILURE;
+               initCause(e);
+       }
+
        /** Too many levels of recursion into archives */
        static final int TOO_DEEP_ARCHIVE_RECURSION = 1;
        /** Don't know what to do with splitfile */
        static final int UNKNOWN_SPLITFILE_METADATA = 2;
+       /** Too many ordinary redirects */
+       static final int TOO_MANY_REDIRECTS = 3;
        /** Don't know what to do with metadata */
        static final int UNKNOWN_METADATA = 3;
+       /** Got a MetadataParseException */
+       static final int INVALID_METADATA = 4;
+       /** Got an ArchiveFailureException */
+       static final int ARCHIVE_FAILURE = 5;
        
 }

Modified: trunk/freenet/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java       2005-10-22 18:56:48 UTC 
(rev 7448)
+++ trunk/freenet/src/freenet/client/Fetcher.java       2005-10-22 20:57:25 UTC 
(rev 7449)
@@ -1,11 +1,12 @@
 package freenet.client;
 
+import java.io.IOException;
+
 import freenet.keys.ClientKey;
 import freenet.keys.FreenetURI;
 import freenet.keys.KeyBlock;
-import freenet.node.SimpleLowLevelClient;
 import freenet.support.Bucket;
-import freenet.support.BucketFactory;
+import freenet.support.Logger;
 
 /** Class that does the actual fetching. Does not have to have a user friendly
  * interface!
@@ -14,20 +15,38 @@
 
        final FreenetURI origURI;
        final FetcherContext ctx;
+       final ArchiveContext archiveContext;
        
-       public Fetcher(FreenetURI uri, FetcherContext ctx) {
+       public Fetcher(FreenetURI uri, FetcherContext ctx, ArchiveContext 
archiveContext) {
                this.origURI = uri;
                this.ctx = ctx;
+               this.archiveContext = archiveContext;
        }
 
+       public FetchResult run() throws FetchException {
+               while(true) {
+                       try {
+                               return realRun();
+                       } catch (ArchiveRestartException e) {
+                               continue;
+                       } catch (MetadataParseException e) {
+                               throw new FetchException(e);
+                       } catch (ArchiveFailureException e) {
+                               
if(e.getMessage().equals(ArchiveFailureException.TOO_MANY_LEVELS))
+                                       throw new 
FetchException(FetchException.TOO_DEEP_ARCHIVE_RECURSION);
+                               throw new FetchException(e);
+                       }
+               }
+       }
+       
        /**
         * Run the actual fetch.
         * @return The result of the fetch - successful or not.
+        * @throws FetchException 
+        * @throws MetadataParseException 
+        * @throws ArchiveFailureException 
         */
-       public FetchResult run(int archiveRecursionLevel) {
-               if(archiveRecursionLevel > ctx.maxArchiveRecursionLevel) {
-                       throw new 
FetchException(FetchException.TOO_DEEP_ARCHIVE_RECURSION);
-               }
+       public FetchResult realRun() throws FetchException, 
ArchiveRestartException, MetadataParseException, ArchiveFailureException {
                FreenetURI uri = origURI;
                ClientKey key = ClientKey.get(origURI);
                ClientMetadata dm = new ClientMetadata();
@@ -44,13 +63,17 @@
                        
                        if(!key.isMetadata()) {
                                // Just return the data
-                               return new FetchResult(dm, 
ctx.bucketFactory.makeImmutableBucket(data));
+                               try {
+                                       return new FetchResult(dm, 
ctx.bucketFactory.makeImmutableBucket(data));
+                               } catch (IOException e) {
+                                       Logger.error(this, "Could not capture 
data - disk full?: "+e, e);
+                               }
                        }
                        
                        // Else need to parse the metadata
                        // This will throw if it finds an error, including 
semi-errors
                        // such as too-big-indirect-metadata
-                       Metadata metadata = new Metadata(data);
+                       Metadata metadata = Metadata.construct(data);
                        
                        while(true) {
                                
@@ -72,17 +95,17 @@
                                        }
                                        continue; // process the new metadata
                                } else if(metadata.isSingleFileRedirect()) {
-                                       key = metadata.getSingleTarget();
+                                       key = 
ClientKey.get(metadata.getSingleTarget());
                                        if(metadata.isArchiveManifest()) {
-                                               zip = 
ctx.archiveManager.makeHandler(key, archiveRecursionLevel + 1, context);
-                                               Bucket metadataBucket = 
zip.getMetadata();
-                                               metadata = new 
Metadata(metadataBucket);
+                                               zip = 
ctx.archiveManager.makeHandler(key, metadata.getArchiveType());
+                                               Bucket metadataBucket = 
zip.getMetadata(archiveContext, ctx);
+                                               metadata = 
Metadata.construct(metadataBucket);
                                                continue;
                                        }
                                        metadata = null;
-                                       
dm.mergeNoOverwrite(metadata.getDocumentMetadata());
+                                       
dm.mergeNoOverwrite(metadata.getClientMetadata());
                                        continue;
-                               } else if(metadata.isZIPInternalRedirect() && 
zip != null) {
+                               } else if(metadata.isArchiveInternalRedirect() 
&& zip != null) {
                                        /** This is the whole document:
                                         * Metadata: ZIP manifest -> fetch ZIP 
file, read .metadata
                                         * .metadata: simple manifest -> look 
up filename ->
@@ -91,28 +114,26 @@
                                         * 
                                         * Now, retreive the data
                                         */
-                                       Bucket result = 
zip.get(metadata.getZIPInternalName());
-                                       
dm.mergeNoOverwrite(metadata.getDocumentMetadata());
+                                       Bucket result = 
zip.get(metadata.getZIPInternalName(), archiveContext, ctx);
+                                       
dm.mergeNoOverwrite(metadata.getClientMetadata());
                                        return new FetchResult(dm, result);
                                } else if(metadata.isSplitfile()) {
                                        
                                        int j;
                                        for(j=0;j<ctx.maxLevels;j++) {
-                                       
-                                               // FIXME need to pass in 
whatever settings SF wants above
-                                               SplitFetcher sf = new 
SplitFetcher(metadata, ctx.maxTempLength);
-                                               Bucket sfResult = sf.run(); // 
will throw in event of error
+                                               SplitFetcher sf = new 
SplitFetcher(metadata, ctx.maxTempLength, archiveContext, ctx);
+                                               Bucket sfResult = sf.fetch(); 
// will throw in event of error
                                                
                                                
if(metadata.isSimpleSplitfile()) {
-                                                       return new 
FetchResult(metadata.getDocumentMetadata(), sfResult);
+                                                       return new 
FetchResult(metadata.getClientMetadata(), sfResult);
                                                } else 
if(metadata.isMultiLevelMetadata()) {
-                                                       metadata = new 
Metadata(sfResult);
+                                                       metadata = 
Metadata.construct(sfResult);
                                                        
if(!metadata.isMultiLevelMetadata())
                                                                break; // try 
the new metadata
                                                } else 
if(metadata.isArchiveManifest()) {
-                                                       zip = 
ctx.archiveManager.getHandler(key, archiveRecursionLevel + 1, context);
-                                                       Bucket metadataBucket = 
zip.getMetadata();
-                                                       metadata = new 
Metadata(metadataBucket);
+                                                       zip = 
ctx.archiveManager.makeHandler(key, metadata.getArchiveType());
+                                                       Bucket metadataBucket = 
zip.getMetadata(archiveContext, ctx);
+                                                       metadata = 
Metadata.construct(metadataBucket);
                                                        break;
                                                } else {
                                                        throw new 
FetchException(FetchException.UNKNOWN_SPLITFILE_METADATA);
@@ -126,10 +147,8 @@
                                        throw new 
FetchException(FetchException.UNKNOWN_METADATA);
                                }
                        } // loop (metadata)
-               } // loop (redirects)
+               }
                // Too many redirects
-               // FIXME Throw an exception
-               // TODO Auto-generated method stub
-               return null;
+               throw new FetchException(FetchException.TOO_MANY_REDIRECTS);
        }
 }

Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2005-10-22 
18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2005-10-22 
20:57:25 UTC (rev 7449)
@@ -16,8 +16,9 @@
 
        /**
         * Blocking fetch of a URI
+        * @throws FetchException If there is an error fetching the data
         */
-       public FetchResult fetch(FreenetURI uri);
+       public FetchResult fetch(FreenetURI uri) throws FetchException;
 
        /**
         * Blocking insert of a URI

Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java     
2005-10-22 18:56:48 UTC (rev 7448)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java     
2005-10-22 20:57:25 UTC (rev 7449)
@@ -2,15 +2,23 @@
 
 import freenet.keys.FreenetURI;
 import freenet.node.SimpleLowLevelClient;
+import freenet.support.BucketFactory;
 
 public class HighLevelSimpleClientImpl implements HighLevelSimpleClient {
 
-       private SimpleLowLevelClient client;
+       private final SimpleLowLevelClient client;
+       private final ArchiveManager archiveManager;
+       private final BucketFactory bucketFactory;
        private long curMaxLength;
        private long curMaxTempLength;
+       public static final int MAX_REDIRECTS = 10;
+       public static final int MAX_METADATA_LEVELS = 5;
+       public static final int MAX_ARCHIVE_LEVELS = 5;
        
-       public HighLevelSimpleClientImpl(SimpleLowLevelClient client) {
+       public HighLevelSimpleClientImpl(SimpleLowLevelClient client, 
ArchiveManager mgr, BucketFactory bf) {
                this.client = client;
+               archiveManager = mgr;
+               bucketFactory = bf;
        }
        
        public void setMaxLength(long maxLength) {
@@ -21,9 +29,11 @@
                curMaxTempLength = maxIntermediateLength;
        }
 
-       public FetchResult fetch(FreenetURI uri) {
-               Fetcher f = new Fetcher(uri, client, curMaxLength, 
curMaxTempLength);
-               return f.run(0);
+       public FetchResult fetch(FreenetURI uri) throws FetchException {
+               FetcherContext context = new FetcherContext(client, 
curMaxLength, curMaxLength, 
+                               MAX_REDIRECTS, MAX_METADATA_LEVELS, 
MAX_ARCHIVE_LEVELS, archiveManager, bucketFactory);
+               Fetcher f = new Fetcher(uri, context, new ArchiveContext());
+               return f.run();
        }
 
        public FreenetURI insert(InsertBlock insert) {

Modified: trunk/freenet/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java      2005-10-22 18:56:48 UTC 
(rev 7448)
+++ trunk/freenet/src/freenet/client/Metadata.java      2005-10-22 20:57:25 UTC 
(rev 7449)
@@ -6,6 +6,8 @@
 import java.util.HashMap;
 import java.util.HashSet;
 
+import freenet.keys.ClientCHK;
+import freenet.keys.ClientKey;
 import freenet.keys.FreenetURI;
 import freenet.support.Bucket;
 import freenet.support.Logger;
@@ -18,9 +20,29 @@
        /** Soft limit, to avoid memory DoS */
        static final int MAX_SPLITFILE_BLOCKS = 100*1000;
        
+       public static Metadata construct(byte[] data) throws 
MetadataParseException {
+               try {
+                       return new Metadata(data);
+               } catch (IOException e) {
+                       MetadataParseException e1 = new 
MetadataParseException("Caught "+e);
+                       e1.initCause(e);
+                       throw e1;
+               }
+       }
+       
+       public static Metadata construct(Bucket data) throws 
MetadataParseException {
+               try {
+                       return new Metadata(data);
+               } catch (IOException e) {
+                       MetadataParseException e1 = new 
MetadataParseException("Caught "+e);
+                       e1.initCause(e);
+                       throw e1;
+               }
+       }
+       
        /** Parse some metadata from a byte[] 
         * @throws IOException If the data is incomplete, or something wierd 
happens. */
-       public Metadata(byte[] data) throws IOException {
+       private Metadata(byte[] data) throws IOException {
                this(new DataInputStream(new ByteArrayInputStream(data)), 
false, data.length);
        }
 
@@ -32,7 +54,7 @@
 
        /** Parse some metadata from a DataInputStream
         * @throws IOException If an I/O error occurs, or the data is 
incomplete. */
-       public Metadata(DataInputStream dis, boolean 
acceptZipInternalRedirects, long length) throws IOException {
+       public Metadata(DataInputStream dis, boolean 
acceptZipInternalRedirects, long length) throws IOException, 
MetadataParseException {
                long magic = dis.readLong();
                if(magic != FREENET_METADATA_MAGIC)
                        throw new MetadataParseException("Invalid magic 
"+magic);
@@ -121,6 +143,8 @@
                        }
                }
                
+               clientMetadata = new ClientMetadata(mimeType);
+               
                if((!splitfile) && documentType == SIMPLE_REDIRECT || 
documentType == ZIP_MANIFEST) {
                        simpleRedirectKey = readKey(dis);
                } else if(splitfile) {
@@ -187,6 +211,12 @@
                                manifestEntries.put(name, data);
                        }
                }
+               
+               if(documentType == ZIP_INTERNAL_REDIRECT) {
+                       int len = (dis.readByte() & 0xff);
+                       byte[] buf = new byte[len];
+                       nameInArchive = new String(buf);
+               }
        }
        
        /**
@@ -283,4 +313,96 @@
        int manifestEntryCount;
        /** Manifest entries by name */
        HashMap manifestEntries;
+       
+       /** ZIP internal redirect: name of file in ZIP */
+       String nameInArchive;
+
+       ClientMetadata clientMetadata;
+       
+       public boolean isSimpleManifest() {
+               return documentType == SIMPLE_MANIFEST;
+       }
+
+       /**
+        * Get the sub-document in a manifest file with the given name.
+        * @throws MetadataParseException 
+        */
+       public Metadata getDocument(String name) throws MetadataParseException {
+               byte[] data = (byte[]) manifestEntries.get(name);
+               if(data == null) return null;
+               return construct(data);
+       }
+
+       /**
+        * The default document is the one which has an empty name.
+        * @throws MetadataParseException 
+        */
+       public Metadata getDefaultDocument() throws MetadataParseException {
+               return getDocument("");
+       }
+
+       /**
+        * Does the metadata point to a single URI?
+        */
+       public boolean isSingleFileRedirect() {
+               return ((!splitfile) &&
+                               documentType == SIMPLE_REDIRECT || documentType 
== MULTI_LEVEL_METADATA ||
+                               documentType == ZIP_MANIFEST);
+       }
+
+       /**
+        * Return the single target of this URI.
+        */
+       public FreenetURI getSingleTarget() {
+               return simpleRedirectKey;
+       }
+
+       /**
+        * Is this a ZIP manifest?
+        */
+       public boolean isArchiveManifest() {
+               return documentType == ZIP_MANIFEST;
+       }
+
+       /**
+        * Is this a ZIP internal redirect?
+        * @return
+        */
+       public boolean isArchiveInternalRedirect() {
+               return documentType == ZIP_INTERNAL_REDIRECT;
+       }
+
+       /**
+        * Return the name of the document referred to in the archive,
+        * if this is a zip internal redirect.
+        */
+       public String getZIPInternalName() {
+               return nameInArchive;
+       }
+
+       /**
+        * Return the client metadata (MIME type etc).
+        */
+       public ClientMetadata getClientMetadata() {
+               return clientMetadata;
+       }
+
+       /** Is this a splitfile manifest? */
+       public boolean isSplitfile() {
+               return splitfile;
+       }
+
+       /** Is this a simple splitfile? */
+       public boolean isSimpleSplitfile() {
+               return splitfile && documentType == SIMPLE_REDIRECT;
+       }
+
+       public boolean isMultiLevelMetadata() {
+               return documentType == MULTI_LEVEL_METADATA;
+       }
+
+       /** What kind of archive is it? */
+       public short getArchiveType() {
+               return archiveType;
+       }
 }

Added: trunk/freenet/src/freenet/client/SplitFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitFetcher.java  2005-10-22 18:56:48 UTC 
(rev 7448)
+++ trunk/freenet/src/freenet/client/SplitFetcher.java  2005-10-22 20:57:25 UTC 
(rev 7449)
@@ -0,0 +1,19 @@
+package freenet.client;
+
+import freenet.support.Bucket;
+
+/**
+ * Class to fetch a splitfile.
+ */
+public class SplitFetcher {
+
+       public SplitFetcher(Metadata metadata, long maxTempLength, 
ArchiveContext archiveContext, FetcherContext ctx) {
+               // TODO Auto-generated constructor stub
+       }
+
+       public Bucket fetch() {
+               // TODO Auto-generated method stub
+               return null;
+       }
+
+}

Modified: trunk/freenet/src/freenet/keys/ClientCHK.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHK.java       2005-10-22 18:56:48 UTC 
(rev 7448)
+++ trunk/freenet/src/freenet/keys/ClientCHK.java       2005-10-22 20:57:25 UTC 
(rev 7449)
@@ -1,5 +1,7 @@
 package freenet.keys;
 
+import java.io.DataInputStream;
+import java.io.IOException;
 import java.net.MalformedURLException;
 
 import freenet.support.Base64;
@@ -45,10 +47,32 @@
         if(extra == null || extra.length < 3)
             throw new MalformedURLException();
         cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] & 
0xff));
+               if(cryptoAlgorithm != ALGO_AES_PCFB_256)
+                       throw new MalformedURLException("Invalid crypto 
algorithm");
         compressed = (extra[2] & 0x01) != 0;
         controlDocument = (extra[2] & 0x02) != 0;
     }
 
+    /**
+     * Create from a raw binary CHK. This expresses the key information
+     * in as few bytes as possible.
+     * @throws IOException 
+     */
+       private ClientCHK(DataInputStream dis) throws IOException {
+               byte[] extra = new byte[EXTRA_LENGTH];
+               dis.readFully(extra);
+        cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] & 
0xff));
+               if(cryptoAlgorithm != ALGO_AES_PCFB_256)
+                       throw new MalformedURLException("Invalid crypto 
algorithm");
+        compressed = (extra[2] & 0x01) != 0;
+        controlDocument = (extra[2] & 0x02) != 0;
+               routingKey = new byte[NodeCHK.KEY_LENGTH];
+               dis.readFully(routingKey);
+               cryptoKey = new byte[CRYPTO_KEY_LENGTH];
+               dis.readFully(cryptoKey);
+       }
+
+    
     byte[] routingKey;
     byte[] cryptoKey;
     boolean compressed;
@@ -60,7 +84,9 @@
                Base64.encode(cryptoKey)+","+compressed+","+controlDocument+
                ","+cryptoAlgorithm;
     }
-    
+
+    static final short EXTRA_LENGTH = 3;
+    static final short CRYPTO_KEY_LENGTH = 32;
     static final short ALGO_AES_PCFB_256 = 1;
 
     /**
@@ -84,4 +110,16 @@
             (byte)((compressed ? 1 : 0) + (controlDocument ? 2 : 0));
         return new FreenetURI("CHK", "", routingKey, cryptoKey, extra);
     }
+
+    /**
+     * Read a raw binary CHK. This is an ultra-compact representation, for
+     * splitfile metadata etc.
+     */
+       public static ClientCHK readRawBinaryKey(DataInputStream dis) throws 
IOException {
+               return new ClientCHK(dis);
+       }
+
+       public boolean isMetadata() {
+               return controlDocument;
+       }
 }

Modified: trunk/freenet/src/freenet/support/Bucket.java
===================================================================
--- trunk/freenet/src/freenet/support/Bucket.java       2005-10-22 18:56:48 UTC 
(rev 7448)
+++ trunk/freenet/src/freenet/support/Bucket.java       2005-10-22 20:57:25 UTC 
(rev 7449)
@@ -36,6 +36,12 @@
      */
     public long size();
 
+    /**
+     * Convert the contents of the bucket to a byte array.
+     * Don't use this unless you know the bucket is small!
+     */
+       public byte[] toByteArray();
+
 }
 
 

_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs

Reply via email to