vgritsenko    2003/12/21 17:38:42

  Modified:    java/src/org/apache/xindice/core/filer HashFiler.java
  Log:
  Reduce synchronization scope when adding record - synchronize only while 
searching free page.
  Add attempt to release free page if write fails.
  Add check on large keys (over one page size) - HashFiler does not support 
them.
  
  Revision  Changes    Path
  1.19      +72 -51    
xml-xindice/java/src/org/apache/xindice/core/filer/HashFiler.java
  
  Index: HashFiler.java
  ===================================================================
  RCS file: 
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/HashFiler.java,v
  retrieving revision 1.18
  retrieving revision 1.19
  diff -u -r1.18 -r1.19
  --- HashFiler.java    18 Dec 2003 15:05:20 -0000      1.18
  +++ HashFiler.java    22 Dec 2003 01:38:42 -0000      1.19
  @@ -188,66 +188,87 @@
           return null;
       }
   
  +    private Page seekInsertionPage(Key key) throws IOException {
  +        int hash = key.getHash();
  +        long pageNum = hash % fileHeader.getPageCount();
  +
  +        // Synchronize by chain head page
  +        Page p = getPage(pageNum);
  +        synchronized (p) {
  +            HashPageHeader ph;
  +            while (true) {
  +                ph = (HashPageHeader) p.getPageHeader();
  +                if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
  +                        || (ph.getStatus() == RECORD && ph.getKeyHash() == 
key.getHash() && p.getKey().equals(key))) {
  +                    // Found free page
  +                    break;
  +                }
  +
  +                pageNum = ph.getNextCollision();
  +                if (pageNum == -1) {
  +                    // Reached end of chain, add new page
  +                    Page np = getFreePage();
  +
  +                    ph.setNextCollision(np.getPageNum().longValue());
  +                    p.write();
  +
  +                    p = np;
  +                    ph = (HashPageHeader) p.getPageHeader();
  +                    ph.setNextCollision(NO_PAGE);
  +                    break;
  +                }
  +
  +                // Go to next page in chain
  +                p = getPage(pageNum);
  +            }
  +
  +            // Here we have a page
  +            long t = System.currentTimeMillis();
  +            if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
  +                // This is a new Record
  +                fileHeader.incRecordCount();
  +                ph.setCreated(t);
  +            }
  +            ph.setModified(t);
  +            ph.setStatus(RECORD);
  +
  +            // Write modifications to the page header before existing 
synchronization block
  +            // This will prevent other threads from getting this same page
  +            p.write();
  +        }
  +
  +        return p;
  +    }
  +
       public boolean writeRecord(Key key, Value value) throws DBException {
  -        if (key == null || key.equals("")) {
  +        // Check that key is not larger than space on the page
  +        if (key == null || key.equals("") || key.getLength() > 
fileHeader.getPageSize() - fileHeader.getPageHeaderSize()) {
               throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid 
key: '" + key + "'");
           }
           if (value == null) {
               throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid 
null value");
           }
           checkOpened();
  +        Page p = null;
           try {
  -            int hash = key.getHash();
  -            long pageNum = hash % fileHeader.getPageCount();
  -
  -            // Synchronize by chain head page
  -            Page p = getPage(pageNum);
  -            synchronized (p) {
  -                HashPageHeader ph;
  -                while (true) {
  -                    ph = (HashPageHeader) p.getPageHeader();
  -                    if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
  -                            || (ph.getStatus() == RECORD && ph.getKeyHash() 
== key.getHash() && p.getKey().equals(key))) {
  -                        // Found free page
  -                        break;
  -                    }
  -
  -                    pageNum = ph.getNextCollision();
  -                    if (pageNum == -1) {
  -                        // Reached end of chain, add new page
  -                        Page np = getFreePage();
  -                        ph.setNextCollision(np.getPageNum().longValue());
  -                        p.write();
  -                        p = np;
  -                        ph = (HashPageHeader) p.getPageHeader();
  -                        ph.setNextCollision(NO_PAGE);
  -                        break;
  -                    }
  -
  -                    // Go to next page in chain
  -                    p = getPage(pageNum);
  -                }
  -
  -                // Here we have a page
  -                long t = System.currentTimeMillis();
  -                if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
  -                    // This is a new Record
  -                    fileHeader.incRecordCount();
  -                    ph.setCreated(t);
  -                }
  -                ph.setModified(t);
  -                ph.setStatus(RECORD);
  -
  -                p.setKey(key);
  -                writeValue(p, value);
  -            }
  -
  -            flush();
  +            p = seekInsertionPage(key);
  +            p.setKey(key);
  +            writeValue(p, value);
  +            p = null;
           } catch (Exception e) {
  -            if (log.isWarnEnabled()) {
  -                log.warn("ignored exception", e);
  +            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, 
"Exception: " + e);
  +        } finally {
  +            if (p != null) {
  +                p.getPageHeader().setStatus(DELETED);
  +                try {
  +                    p.write();
  +                } catch (IOException ignored) {
  +                    // Double exception
  +                }
               }
           }
  +
  +        flush();
           return true;
       }
   
  
  
  

Reply via email to