Revision: 5882
          http://jnode.svn.sourceforge.net/jnode/?rev=5882&view=rev
Author:   galatnm
Date:     2012-01-25 15:46:30 +0000 (Wed, 25 Jan 2012)
Log Message:
-----------
Fixes from luke concerning HFS+ extends management.

Modified Paths:
--------------
    trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFile.java
    trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java
    trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
    trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
    trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java

Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFile.java       2011-12-22 
15:30:32 UTC (rev 5881)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFile.java       2012-01-25 
15:46:30 UTC (rev 5882)
@@ -22,11 +22,9 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-
 import org.jnode.fs.FSFile;
 import org.jnode.fs.FileSystem;
 import org.jnode.fs.hfsplus.catalog.CatalogFile;
-import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
 
 public class HfsPlusFile implements FSFile {
 
@@ -57,12 +55,7 @@
     @Override
     public final void read(final long fileOffset, final ByteBuffer dest) 
throws IOException {
         HfsPlusFileSystem fs = (HfsPlusFileSystem) getFileSystem();
-        for (ExtentDescriptor d : file.getDatas().getExtents()) {
-            if (!d.isEmpty()) {
-                long firstOffset = (long) 
d.getStartOffset(fs.getVolumeHeader().getBlockSize());
-                fs.getApi().read(firstOffset, dest);
-            }
-        }
+        file.getDatas().read(fs,fileOffset,dest);
     }
 
     @Override

Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java   2011-12-22 
15:30:32 UTC (rev 5881)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java   2012-01-25 
15:46:30 UTC (rev 5882)
@@ -20,6 +20,8 @@
  
 package org.jnode.fs.hfsplus;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
 import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
 import org.jnode.util.BigEndian;
 
@@ -111,7 +113,32 @@
     public ExtentDescriptor getExtent(int index) {
         return extents[index];
     }
+    /**
+     * Read a block of data
+     *
+     * @param fileSystem the associated file system.
+     * @param offset the offset to read from.
+     * @param buffer the buffer to read into.
+     * @throws java.io.IOException if an error occurs.
+     */
+    public void read(HfsPlusFileSystem fileSystem, long offset, ByteBuffer 
buffer) throws IOException {
+        for (ExtentDescriptor extentDescriptor : extents) {
+            if (buffer.remaining() > 0 && !extentDescriptor.isEmpty()) {
+                long length = 
extentDescriptor.getSize(fileSystem.getVolumeHeader().getBlockSize());
 
+                if (offset != 0 && length < offset) {
+                    offset -= length;
+                } else {
+
+                    long firstOffset = 
extentDescriptor.getStartOffset(fileSystem.getVolumeHeader().getBlockSize());
+                    fileSystem.getApi().read(firstOffset + offset, buffer);
+
+                    offset = 0;
+                }
+            }
+        }
+    }
+
     /**
      * 
      * @param index

Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java   2011-12-22 
15:30:32 UTC (rev 5881)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java   2012-01-25 
15:46:30 UTC (rev 5882)
@@ -24,13 +24,12 @@
 import java.nio.ByteBuffer;
 import java.util.LinkedList;
 import java.util.List;
-
 import org.apache.log4j.Logger;
 import org.jnode.fs.hfsplus.HFSPlusParams;
 import org.jnode.fs.hfsplus.HfsPlusFileSystem;
+import org.jnode.fs.hfsplus.HfsPlusForkData;
 import org.jnode.fs.hfsplus.HfsUnicodeString;
 import org.jnode.fs.hfsplus.SuperBlock;
-import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
 import org.jnode.fs.hfsplus.tree.BTHeaderRecord;
 import org.jnode.fs.hfsplus.tree.IndexRecord;
 import org.jnode.fs.hfsplus.tree.LeafRecord;
@@ -52,7 +51,7 @@
      */
     private BTHeaderRecord bthr;
 
-    private int catalogHeaderNodeOffset;
+    private HfsPlusForkData catalogFile;
 
     private ByteBuffer buffer;
 
@@ -66,13 +65,12 @@
         log.info("Load B-Tree catalog file.");
         this.fs = fs;
         SuperBlock sb = fs.getVolumeHeader();
-        ExtentDescriptor firstExtent = sb.getCatalogFile().getExtent(0);
-        catalogHeaderNodeOffset = 
firstExtent.getStartOffset(sb.getBlockSize());
-        if (!firstExtent.isEmpty()) {
-            buffer =
-                    
ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH +
+        catalogFile = sb.getCatalogFile();
+        
+        if(!catalogFile.getExtent(0).isEmpty()) {
+            buffer = 
ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH +
                             BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
-            fs.getApi().read(catalogHeaderNodeOffset, buffer);
+            catalogFile.read(fs, 0, buffer);
             buffer.rewind();
             byte[] data = ByteBufferUtils.toArray(buffer);
             log.info("Load catalog node descriptor.");
@@ -150,7 +148,7 @@
      */
     public void update() throws IOException {
         SuperBlock vh = fs.getVolumeHeader();
-        int offset = 
vh.getCatalogFile().getExtent(0).getStartOffset(vh.getBlockSize());
+        long offset = 
vh.getCatalogFile().getExtent(0).getStartOffset(vh.getBlockSize());
         fs.getApi().write(offset, this.getBytes());
     }
 
@@ -197,8 +195,7 @@
         LeafRecord lr = null;
         int nodeSize = bthr.getNodeSize();
         ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
-        fs.getApi().read(catalogHeaderNodeOffset + 
(getBTHeaderRecord().getRootNode() * nodeSize),
-                nodeData);
+        catalogFile.read(fs, (bthr.getRootNode()*nodeSize), nodeData);
         nodeData.rewind();
         byte[] data = ByteBufferUtils.toArray(nodeData);
         NodeDescriptor nd = new NodeDescriptor(nodeData.array(), 0);
@@ -206,9 +203,9 @@
         while (nd.isIndexNode()) {
             CatalogIndexNode node = new CatalogIndexNode(data, nodeSize);
             IndexRecord record = (IndexRecord) node.find(parentID);
-            currentOffset = catalogHeaderNodeOffset + (record.getIndex() * 
nodeSize);
+            currentOffset = record.getIndex() * nodeSize;
             nodeData = ByteBuffer.allocate(nodeSize);
-            fs.getApi().read(currentOffset, nodeData);
+            catalogFile.read(fs, currentOffset, nodeData);
             nodeData.rewind();
             data = ByteBufferUtils.toArray(nodeData);
             nd = new NodeDescriptor(nodeData.array(), 0);
@@ -248,7 +245,7 @@
             int currentNodeNumber = nodeNumber;
             int nodeSize = getBTHeaderRecord().getNodeSize();
             ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
-            fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * 
nodeSize), nodeData);
+            catalogFile.read(fs, (currentNodeNumber * nodeSize), nodeData);
             byte[] datas = nodeData.array();
             NodeDescriptor nd = new NodeDescriptor(datas, 0);
             if (nd.isIndexNode()) {
@@ -286,7 +283,7 @@
         int currentNodeNumber = getBTHeaderRecord().getRootNode();
         int nodeSize = getBTHeaderRecord().getNodeSize();
         ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
-        fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * 
nodeSize), nodeData);
+        catalogFile.read(fs, (currentNodeNumber * nodeSize), nodeData);
         NodeDescriptor nd = new NodeDescriptor(nodeData.array(), 0);
         int currentOffset = 0;
         CatalogKey cKey = new CatalogKey(parentID, nodeName);
@@ -294,9 +291,9 @@
             CatalogIndexNode node = new CatalogIndexNode(nodeData.array(), 
nodeSize);
             IndexRecord record = node.find(cKey);
             currentNodeNumber = record.getIndex();
-            currentOffset = catalogHeaderNodeOffset + record.getIndex() * 
nodeSize;
+            currentOffset = record.getIndex() * nodeSize;
             nodeData = ByteBuffer.allocate(nodeSize);
-            fs.getApi().read(currentOffset, buffer);
+            catalogFile.read(fs, currentOffset, buffer);
             node = new CatalogIndexNode(nodeData.array(), nodeSize);
         }
         LeafRecord lr = null;

Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java   
2011-12-22 15:30:32 UTC (rev 5881)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java   
2012-01-25 15:46:30 UTC (rev 5882)
@@ -89,8 +89,8 @@
      * @param nodeSize the size of a node.
      * @return offset of the extent.
      */
-    public int getStartOffset(int nodeSize) {
-        return startBlock * nodeSize;
+    public long getStartOffset(int nodeSize) {
+        return (long)startBlock * nodeSize;
     }
 
     /**
@@ -108,8 +108,8 @@
      * @param nodeSize the size of a node.
      * @return size of the extent.
      */
-    public int getSize(int nodeSize) {
-        return blockCount * nodeSize;
+    public long getSize(int nodeSize) {
+        return (long)blockCount * nodeSize;
     }
 
     /**

Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java       
2011-12-22 15:30:32 UTC (rev 5881)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java       
2012-01-25 15:46:30 UTC (rev 5882)
@@ -93,12 +93,12 @@
         lastLeafNode = BigEndian.getInt32(data, 14);
         nodeSize = BigEndian.getInt16(data, 18);
         maxKeyLength = BigEndian.getInt16(data, 20);
-        totalNodes = BigEndian.getInt16(data, 24);
-        freeNodes = BigEndian.getInt16(data, 28);
-        clumpSize = BigEndian.getInt16(data, 32);
-        treeType = BigEndian.getInt16(data, 36);
-        keyCompareType = BigEndian.getInt16(data, 37);
-        attributes = BigEndian.getInt32(data, 39);
+        totalNodes = BigEndian.getInt32(data, 22);
+        freeNodes = BigEndian.getInt32(data, 26);
+        clumpSize = BigEndian.getInt32(data, 32);
+        treeType = BigEndian.getInt8(data,36);
+        keyCompareType = BigEndian.getInt8(data,36);
+        attributes = BigEndian.getInt32(data, 38);
     }
 
     public byte[] getBytes() {
@@ -114,8 +114,8 @@
         BigEndian.setInt32(data, 26, freeNodes);
         BigEndian.setInt32(data, 32, clumpSize);
         BigEndian.setInt8(data, 36, treeType);
-        BigEndian.setInt8(data, 38, keyCompareType);
-        BigEndian.setInt32(data, 39, attributes);
+        BigEndian.setInt8(data, 37, keyCompareType);
+        BigEndian.setInt32(data, 38, attributes);
         return data;
     }
    

This was sent by the SourceForge.net collaborative development platform, the 
world's largest Open Source development site.


------------------------------------------------------------------------------
Keep Your Developer Skills Current with LearnDevNow!
The most comprehensive online learning library for Microsoft developers
is just $99.99! Visual Studio, SharePoint, SQL - plus HTML5, CSS3, MVC3,
Metro Style Apps, more. Free future releases when you subscribe now!
http://p.sf.net/sfu/learndevnow-d2d
_______________________________________________
Jnode-svn-commits mailing list
Jnode-svn-commits@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jnode-svn-commits

Reply via email to