Author: knopp
Date: Tue Mar 31 15:42:32 2009
New Revision: 760488
URL: http://svn.apache.org/viewvc?rev=760488&view=rev
Log: (empty)
Added:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java
(with props)
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java
(with props)
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java
(with props)
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java
(with props)
Modified:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/AbstractPageManager.java
Modified:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/AbstractPageManager.java
URL:
http://svn.apache.org/viewvc/wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/AbstractPageManager.java?rev=760488&r1=760487&r2=760488&view=diff
==============================================================================
---
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/AbstractPageManager.java
(original)
+++
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/AbstractPageManager.java
Tue Mar 31 15:42:32 2009
@@ -129,7 +129,17 @@
logger.error("Error detaching page", e);
}
}
- storeTouchedPages(touchedPages);
+
+ // store pages that are not stateless
+ List<IPage> statefulPages = new
ArrayList<IPage>(touchedPages.size());
+ for (IPage page : touchedPages)
+ {
+ if (!page.isPageStateless())
+ {
+ statefulPages.add(page);
+ }
+ }
+ storeTouchedPages(statefulPages);
}
List<IPage> touchedPages = new ArrayList<IPage>();
Added:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java
URL:
http://svn.apache.org/viewvc/wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java?rev=760488&view=auto
==============================================================================
---
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java
(added)
+++
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java
Tue Mar 31 15:42:32 2009
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.wicket.page.persistent.disk;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.channels.FileChannel;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Thread safe pool of {...@link FileChannel} objects.
+ * <p>
+ * Opening and closing file is an expensive operation and under certain
circumstances this can
+ * significantly harm performances, because on every close the file system
cache might be flushed.
+ * <p>
+ * To minimize the negative impact opened files can be pooled, which is a
responsibility of
+ * {...@link FileChannelPool} class.
+ * <p>
+ * {...@link FileChannelPool} allows to specify maximum number of opened
{...@link FileChannel}s.
+ * <p>
+ * Note that under certain circumstances (when there are no empty slots in
pool) the initial
+ * capacity can be exceeded (more files are opened then the specified capacity
is). If this happens,
+ * a warning is written to log, as this probably means that there is a problem
with page store.
+ *
+ * @author Matej Knopp
+ */
+public class FileChannelPool
+{
+ private final Map<String, FileChannel> nameToChannel = new
HashMap<String, FileChannel>();
+ private final Map<FileChannel, String> channelToName = new
HashMap<FileChannel, String>();
+ private final Map<FileChannel, Integer> channelToUseCount = new
HashMap<FileChannel, Integer>();
+ private final LinkedList<FileChannel> idleChannels = new
LinkedList<FileChannel>();
+ private final Set<FileChannel> channelsToDeleteOnReturn = new
HashSet<FileChannel>();
+
+ private final int capacity;
+
+ /**
+ * Construct.
+ *
+ * @param capacity
+ * Maximum number of opened file channels.
+ */
+ public FileChannelPool(int capacity)
+ {
+ this.capacity = capacity;
+
+ if (capacity < 1)
+ {
+ throw new IllegalArgumentException("Capacity must be at
least one.");
+ }
+
+ log.debug("Starting file channel pool with capacity of " +
capacity + " channels");
+ };
+
+ /**
+ * Creates a new file channel with specified file name.
+ *
+ * @param fileName
+ * @param createIfDoesNotExist
+ * in case the file does not exist this parameter determines
if the file should be
+ * created
+ * @return file channel or null
+ */
+ private FileChannel newFileChannel(String fileName, boolean
createIfDoesNotExist)
+ {
+ File file = new File(fileName);
+ if (file.exists() == false && createIfDoesNotExist == false)
+ {
+ return null;
+ }
+
+ try
+ {
+ FileChannel channel = new RandomAccessFile(file,
"rw").getChannel();
+ return channel;
+ }
+ catch (FileNotFoundException e)
+ {
+ throw new RuntimeException(e);
+ }
+ }
+
+
+ /**
+ * Tries to reduce (close) enough channels to have at least one channel
free (so that there are
+ * maximum capacity - 1 opened channel).
+ */
+ private void reduceChannels()
+ {
+ // how much channels we need to close?
+ int channelsToReduce = nameToChannel.size() - capacity + 1;
+
+ // while there are still channels to close and we have still
idle
+ // channels left
+ while (channelsToReduce > 0 && idleChannels.isEmpty() == false)
+ {
+ FileChannel channel = idleChannels.getFirst();
+ String channelName = channelToName.get(channel);
+
+ // remove oldest idle channel
+ idleChannels.removeFirst();
+ nameToChannel.remove(channelName);
+ channelToName.remove(channel);
+
+ // this shouldn't really happen
+ if (channelToUseCount.get(channel) != null)
+ {
+ log.warn("Channel " + channelName + " is both
idle and in use at the same time!");
+ channelToUseCount.remove(channel);
+ }
+
+ try
+ {
+ channel.close();
+ }
+ catch (IOException e)
+ {
+ log.error("Error closing file channel", e);
+ }
+ --channelsToReduce;
+ }
+
+ if (channelsToReduce > 0)
+ {
+ log.warn("Unable to reduce enough channels, no idle
channels left to remove.");
+ }
+ }
+
+ /**
+ * Returns a channel for given file. If the file doesn't exist, the
createIfDoesNotExit
+ * attribute specifies if the file should be created.
+ *
+ * Do NOT call close on the returned chanel. Instead call
+ * {...@link #returnFileChannel(FileChannel)}
+ *
+ * @param fileName
+ * @param createIfDoesNotExist
+ * @return file channel
+ */
+ public synchronized FileChannel getFileChannel(String fileName, boolean
createIfDoesNotExist)
+ {
+ FileChannel channel = nameToChannel.get(fileName);
+
+ if (channel == null)
+ {
+ channel = newFileChannel(fileName,
createIfDoesNotExist);
+
+ if (channel != null)
+ {
+ // we need to create new channel
+ // first, check how many channels we have
already opened
+ if (nameToChannel.size() >= capacity)
+ {
+ reduceChannels();
+ }
+
+ nameToChannel.put(fileName, channel);
+ channelToName.put(channel, fileName);
+ }
+ }
+
+ if (channel != null)
+ {
+ // increase the usage count for this channel
+
+ Integer count = channelToUseCount.get(channel);
+ if (count == null || count.intValue() == 0)
+ {
+ channelToUseCount.put(channel, new Integer(1));
+ idleChannels.remove(channel);
+ }
+ else
+ {
+ count = new Integer(count.intValue() + 1);
+ channelToUseCount.put(channel, count);
+ }
+ }
+
+ return channel;
+ }
+
+ /**
+ * Returns the channel to the pool. It is necessary to call this for
every channel obtained by
+ * calling {...@link #getFileChannel(String, boolean)}.
+ *
+ * @param channel
+ */
+ public synchronized void returnFileChannel(FileChannel channel)
+ {
+ Integer count = channelToUseCount.get(channel);
+
+ if (count == null || count.intValue() == 0)
+ {
+ throw new IllegalArgumentException("Trying to return
unused channel");
+ }
+
+ count = new Integer(count.intValue() - 1);
+
+ // decrease the usage count
+ if (count.intValue() == 0)
+ {
+ channelToUseCount.remove(channel);
+ if (channelsToDeleteOnReturn.contains(channel))
+ {
+ closeAndDelete(channel);
+ }
+ else
+ {
+ // this was the last usage, add chanel to idle
channels
+ idleChannels.addLast(channel);
+ }
+ }
+ else
+ {
+ channelToUseCount.put(channel, count);
+ }
+ }
+
+ private void closeAndDelete(FileChannel channel)
+ {
+ channelsToDeleteOnReturn.remove(channel);
+ String name = channelToName.get(channel);
+ channelToName.remove(channel);
+
+ channelToUseCount.remove(channel);
+ idleChannels.remove(channel);
+
+ try
+ {
+ channel.close();
+ }
+ catch (IOException e)
+ {
+ log.error("Error closing file channel", e);
+ }
+
+ File file = new File(name);
+ file.delete();
+ }
+
+ /**
+ * Closes the file channel with given name and removes it from pool.
Also removes the file from
+ * file system. If the channel is in use, the pool first waits until
the chanel is returned to
+ * the pool and then closes it.
+ *
+ * @param name
+ */
+ public synchronized void closeAndDeleteFileChannel(String name)
+ {
+ FileChannel channel = nameToChannel.get(name);
+ if (channel != null)
+ {
+ nameToChannel.remove(name);
+
+ Integer count = channelToUseCount.get(channel);
+ if (count != null && count.intValue() > 0)
+ {
+ channelsToDeleteOnReturn.add(channel);
+ }
+ else
+ {
+ closeAndDelete(channel);
+ }
+ }
+ else
+ {
+ File file = new File(name);
+ file.delete();
+ }
+ }
+
+ /**
+ * Destroys the {...@link FileChannel} pool and closes all opened
channels.
+ */
+ public synchronized void destroy()
+ {
+ log.debug("Destroying FileChannel pool");
+ for (Iterator<FileChannel> i =
channelToName.keySet().iterator(); i.hasNext();)
+ {
+ FileChannel channel = i.next();
+ try
+ {
+ channel.close();
+ }
+ catch (IOException e)
+ {
+ log.error("Error closing file channel", e);
+ }
+ }
+ }
+
+ private static final Logger log =
LoggerFactory.getLogger(FileChannelPool.class);
+}
\ No newline at end of file
Propchange:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/FileChannelPool.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Added:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java
URL:
http://svn.apache.org/viewvc/wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java?rev=760488&view=auto
==============================================================================
---
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java
(added)
+++
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java
Tue Mar 31 15:42:32 2009
@@ -0,0 +1,484 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.wicket.page.persistent.disk;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.wicket.util.collections.IntHashMap;
+
+/**
+ * Manages positions and size of serialized pages in the pagemap file.
+ * <p>
+ * The pages are stored inside the file in a cyclic way. Newer pages are
placed after older ones,
+ * until the maximum file size is reached. After that, the next page is stored
in the beginning of
+ * the file.
+ *
+ * @author Matej Knopp
+ */
+public class PageWindowManager implements Serializable
+{
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Contains information about a page inside the file.
+ *
+ * @author Matej Knopp
+ */
+ private static class PageWindowInternal implements Serializable
+ {
+ private static final long serialVersionUID = 1L;
+
+ // id of page or -1 if the window is empty
+ private int pageId;
+
+ // offset in the file where the serialized page data begins
+ private int filePartOffset;
+
+ // size of serialized page data
+ private int filePartSize;
+ }
+
+ // list of PageWindowInternal objects
+ private final List<PageWindowInternal> windows = new
ArrayList<PageWindowInternal>();
+
+ // map from page id to list of pagewindow indices (referring to the
windows
+ // list) - to improve searching speed
+ // the index must be cleaned when the instances in the windows list
+ // change their indexes (e.g. items are shifted on page window removal)
+ private IntHashMap<Integer> idToWindowIndex = null;
+
+ private void putWindowIndex(int pageId, int windowIndex)
+ {
+ if (idToWindowIndex != null && pageId != -1 && windowIndex !=
-1)
+ {
+ idToWindowIndex.put(pageId, windowIndex);
+ }
+ }
+
+ private void removeWindowIndex(int pageId)
+ {
+ idToWindowIndex.remove(pageId);
+ }
+
+ private void rebuildIndices()
+ {
+ idToWindowIndex = null;
+ idToWindowIndex = new IntHashMap<Integer>();
+ for (int i = 0; i < windows.size(); ++i)
+ {
+ PageWindowInternal window = windows.get(i);
+ putWindowIndex(window.pageId, i);
+ }
+ }
+
+
+ /**
+ * Returns the index of the given page in the {...@link #windows} list.
+ *
+ * @param pageId
+ * @param versionNumber
+ * @param ajaxVersionNumber
+ * @return window index
+ */
+ private int getWindowIndex(int pageId)
+ {
+ if (idToWindowIndex == null)
+ {
+ rebuildIndices();
+ }
+
+
+ Integer result = idToWindowIndex.get(pageId);
+ return result != null ? result : -1;
+ }
+
+ // index of last added page
+ private int indexPointer = -1;
+
+ /**
+ * Increments the {...@link #indexPointer}. If the maximum file size
has been reached, the
+ * {...@link #indexPointer} is set to 0.
+ *
+ * @return new index pointer
+ */
+ private int incrementIndexPointer()
+ {
+ if (maxSize > 0 && totalSize >= maxSize && indexPointer ==
windows.size() - 1)
+ {
+ indexPointer = 0;
+ }
+ else
+ {
+ ++indexPointer;
+ }
+ return indexPointer;
+ }
+
+ /**
+ * Returns the offset in file of the window on given index. The offset
is counted by getting the
+ * previous page offset and adding the previous page size to it.
+ *
+ * @param index
+ * @return window file offset
+ */
+ private int getWindowFileOffset(int index)
+ {
+ if (index > 0)
+ {
+ PageWindowInternal window = windows.get(index - 1);
+ return window.filePartOffset + window.filePartSize;
+ }
+ else
+ {
+ return 0;
+ }
+ }
+
+ /**
+ * Splits the window with given index to two windows. First of those
will have size specified by
+ * the argument, the other one will fill up the rest of the original
window.
+ *
+ * @param index
+ * @param size
+ */
+ private void splitWindow(int index, int size)
+ {
+ PageWindowInternal window = windows.get(index);
+ int delta = window.filePartSize - size;
+
+ if (index == windows.size() - 1)
+ {
+ // if this is last window
+ totalSize -= delta;
+ window.filePartSize = size;
+ }
+ else if (window.filePartSize != size)
+ {
+ PageWindowInternal newWindow = new PageWindowInternal();
+ newWindow.pageId = -1;
+ window.filePartSize = size;
+
+ windows.add(index + 1, newWindow);
+
+ newWindow.filePartOffset = getWindowFileOffset(index +
1);
+ newWindow.filePartSize = delta;
+ }
+
+ idToWindowIndex = null;
+ }
+
+ /**
+ * Merges the window with given index with the next window. The
resulting window will have size
+ * of the two windows summed together.
+ *
+ * @param index
+ */
+ private void mergeWindowWithNext(int index)
+ {
+ if (index < windows.size() - 1)
+ {
+ PageWindowInternal window = windows.get(index);
+ PageWindowInternal next = windows.get(index + 1);
+ window.filePartSize += next.filePartSize;
+
+ windows.remove(index + 1);
+ idToWindowIndex = null; // reset index
+ }
+ }
+
+ /**
+ * Adjusts the window on given index to the specified size. If the new
size is smaller than the
+ * window size, the window will be split. Otherwise the window will be
merged with as many
+ * subsequent window as necessary. In case the window is last window in
the file, the size will
+ * be adjusted without splitting or merging.
+ *
+ * @param index
+ * @param size
+ */
+ private void adjustWindowSize(int index, int size)
+ {
+ PageWindowInternal window = windows.get(index);
+
+ // last window, just adjust size
+ if (index == windows.size() - 1)
+ {
+ int delta = size - window.filePartSize;
+ totalSize += delta;
+ window.filePartSize = size;
+ }
+ else
+ {
+ // merge as many times as necessary
+ while (window.filePartSize < size && index <
windows.size() - 1)
+ {
+ mergeWindowWithNext(index);
+ }
+ // done merging - do we have enough room ?
+ if (window.filePartSize < size)
+ {
+ // no, this is the last window
+ int delta = size - window.filePartSize;
+ totalSize += delta;
+ window.filePartSize = size;
+ }
+ else
+ {
+ // yes, we might want to split the window, so
that we don't lose
+ // space when
+ // the created window was too big
+ splitWindow(index, size);
+ }
+ }
+
+ window.pageId = -1;
+ }
+
+ /**
+ * Allocates window on given index with to size. If the index is
pointing to existing window,
+ * the window size will be adjusted. Otherwise a new window with
appropriated size will be
+ * created.
+ *
+ * @param index
+ * @param size
+ * @return page window
+ */
+ private PageWindowInternal allocatePageWindow(int index, int size)
+ {
+ final PageWindowInternal window;
+
+ // new window
+ if (index == windows.size())
+ {
+ // new page window
+ window = new PageWindowInternal();
+ window.filePartOffset = getWindowFileOffset(index);
+ totalSize += size;
+ window.filePartSize = size;
+ windows.add(window);
+ }
+ else
+ {
+ // get the window
+ window = windows.get(index);
+
+ // adjust if necessary
+ if (window.filePartSize != size)
+ {
+ adjustWindowSize(index, size);
+ }
+ }
+
+
+ return window;
+ }
+
+ /**
+ * Public (read only) version of page window.
+ *
+ * @author Matej Knopp
+ */
+ public static class PageWindow
+ {
+ private final PageWindowInternal pageWindowInternal;
+
+ /**
+ * Construct.
+ *
+ * @param pageWindowInternal
+ */
+ private PageWindow(PageWindowInternal pageWindowInternal)
+ {
+ this.pageWindowInternal = pageWindowInternal;
+ }
+
+ /**
+ * @return page Id
+ */
+ public int getPageId()
+ {
+ return pageWindowInternal.pageId;
+ }
+
+ /**
+ * @return offset in the pagemap file where the serialized page
data starts
+ */
+ public int getFilePartOffset()
+ {
+ return pageWindowInternal.filePartOffset;
+ }
+
+ /**
+ * @return size of the serialized page data
+ */
+ public int getFilePartSize()
+ {
+ return pageWindowInternal.filePartSize;
+ }
+ }
+
+ /**
+ * Creates and returns a new page window for given page.
+ *
+ * @param pageId
+ * @param versionNumber
+ * @param ajaxVersionNumber
+ * @param size
+ * @return page window
+ */
+ public PageWindow createPageWindow(int pageId, int size)
+ {
+ int index = getWindowIndex(pageId);
+
+ // if we found the page window, mark it as invalid
+ if (index != -1)
+ {
+ removeWindowIndex(pageId);
+ (windows.get(index)).pageId = -1;
+ }
+
+ // if we are not going to reuse a page window (because it's not
on
+ // indexPointor position or because we didn't find it),
increment the
+ // indexPointer
+ if (index == -1 || index != indexPointer)
+ {
+ index = incrementIndexPointer();
+ }
+
+ PageWindowInternal window = allocatePageWindow(index, size);
+ window.pageId = pageId;
+
+ putWindowIndex(pageId, index);
+ return new PageWindow(window);
+ }
+
+ /**
+ * Returns the page window for given page or null if no window was
found.
+ *
+ * @param pageId
+ * @param versionNumber
+ * @param ajaxVersionNumber
+ * @return page window or null
+ */
+ public PageWindow getPageWindow(int pageId)
+ {
+ int index = getWindowIndex(pageId);
+ if (index != -1)
+ {
+ return new PageWindow(windows.get(index));
+ }
+ else
+ {
+ return null;
+ }
+ }
+
+ /**
+ * Removes the page window for given page.
+ *
+ * @param pageId
+ * @param versionNumber
+ * @param ajaxVersionNumber
+ */
+ public void removePage(int pageId)
+ {
+ int index = getWindowIndex(pageId);
+ if (index != -1)
+ {
+ PageWindowInternal window = windows.get(index);
+ removeWindowIndex(pageId);
+ if (index == windows.size() - 1)
+ {
+ windows.remove(index);
+ totalSize -= window.filePartSize;
+ if (indexPointer == index)
+ {
+ --indexPointer;
+ }
+ }
+ else
+ {
+ window.pageId = -1;
+ }
+ }
+ }
+
+
+ /**
+ * Returns last n saved page windows.
+ *
+ * @param count
+ * @return list of page windows
+ */
+ public synchronized List<PageWindow> getLastPageWindows(int count)
+ {
+ List<PageWindow> result = new ArrayList<PageWindow>();
+ int currentIndex = indexPointer;
+
+ do
+ {
+ if (currentIndex == -1)
+ {
+ break;
+ }
+
+ PageWindowInternal window = windows.get(currentIndex);
+ if (window.pageId != -1)
+ {
+ result.add(new PageWindow(window));
+ }
+
+ --currentIndex;
+
+ if (currentIndex == -1)
+ {
+ currentIndex = result.size() - 1;
+ }
+
+ }
+ while (result.size() < count && currentIndex != indexPointer);
+
+ return result;
+ }
+
+ /**
+ * Creates a new PageWindowManager.
+ *
+ * @param maxSize
+ * maximum page size. After this size is exceeded, the pages
will be saved starting
+ * at the beginning of file
+ */
+ public PageWindowManager(int maxSize)
+ {
+ this.maxSize = maxSize;
+ }
+
+ /**
+ * Returns the size of all saved pages
+ *
+ * @return total size
+ */
+ public int getTotalSize()
+ {
+ return totalSize;
+ }
+
+ private int totalSize = 0;
+
+ private final int maxSize;
+}
Propchange:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/page/persistent/disk/PageWindowManager.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Added:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java
URL:
http://svn.apache.org/viewvc/wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java?rev=760488&view=auto
==============================================================================
---
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java
(added)
+++
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java
Tue Mar 31 15:42:32 2009
@@ -0,0 +1,1178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.wicket.util.collections;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.AbstractCollection;
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.ConcurrentModificationException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+/**
+ * This is an integer hashmap that has the exact same features and interface
as a normal Map except
+ * that the key is directly an integer. So no hash is calculated or key object
is stored.
+ *
+ * @author jcompagner
+ *
+ * @param <V>
+ * The value in the map
+ */
+public class IntHashMap<V> implements Cloneable, Serializable
+{
+ transient volatile Set<Integer> keySet = null;
+
+ transient volatile Collection<V> values = null;
+
+ /**
+ * The default initial capacity - MUST be a power of two.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly specified
by either of the
+ * constructors with arguments. MUST be a power of two <= 1<<30.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The load factor used when none specified in constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The table, resized as necessary. Length MUST Always be a power of
two.
+ */
+ transient Entry<V>[] table;
+
+ /**
+ * The number of key-value mappings contained in this identity hash map.
+ */
+ transient int size;
+
+ /**
+ * The next size value at which to resize (capacity * load factor).
+ *
+ * @serial
+ */
+ int threshold;
+
+ /**
+ * The load factor for the hash table.
+ *
+ * @serial
+ */
+ final float loadFactor;
+
+ /**
+ * The number of times this HashMap has been structurally modified
Structural modifications are
+ * those that change the number of mappings in the HashMap or otherwise
modify its internal
+ * structure (e.g., rehash). This field is used to make iterators on
Collection-views of the
+ * HashMap fail-fast. (See ConcurrentModificationException).
+ */
+ transient volatile int modCount;
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
capacity and load factor.
+ *
+ * @param initialCapacity
+ * The initial capacity.
+ * @param loadFactor
+ * The load factor.
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative or the load factor
is nonpositive.
+ */
+ @SuppressWarnings("unchecked")
+ public IntHashMap(int initialCapacity, float loadFactor)
+ {
+ if (initialCapacity < 0)
+ {
+ throw new IllegalArgumentException("Illegal initial
capacity: " + //$NON-NLS-1$
+ initialCapacity);
+ }
+ if (initialCapacity > MAXIMUM_CAPACITY)
+ {
+ initialCapacity = MAXIMUM_CAPACITY;
+ }
+ if (loadFactor <= 0 || Float.isNaN(loadFactor))
+ {
+ throw new IllegalArgumentException("Illegal load
factor: " + //$NON-NLS-1$
+ loadFactor);
+ }
+
+ // Find a power of 2 >= initialCapacity
+ int capacity = 1;
+ while (capacity < initialCapacity)
+ {
+ capacity <<= 1;
+ }
+
+ this.loadFactor = loadFactor;
+ threshold = (int)(capacity * loadFactor);
+ table = new Entry[capacity];
+ init();
+ }
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
capacity and the default
+ * load factor (0.75).
+ *
+ * @param initialCapacity
+ * the initial capacity.
+ * @throws IllegalArgumentException
+ * if the initial capacity is negative.
+ */
+ public IntHashMap(int initialCapacity)
+ {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR);
+ }
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the default initial
capacity (16) and the default
+ * load factor (0.75).
+ */
+ @SuppressWarnings("unchecked")
+ public IntHashMap()
+ {
+ loadFactor = DEFAULT_LOAD_FACTOR;
+ threshold = (int)(DEFAULT_INITIAL_CAPACITY *
DEFAULT_LOAD_FACTOR);
+ table = new Entry[DEFAULT_INITIAL_CAPACITY];
+ init();
+ }
+
+ // internal utilities
+
+ /**
+ * Initialization hook for subclasses. This method is called in all
constructors and
+ * pseudo-constructors (clone, readObject) after HashMap has been
initialized but before any
+ * entries have been inserted. (In the absence of this method,
readObject would require explicit
+ * knowledge of subclasses.)
+ */
+ void init()
+ {
+ }
+
+ /**
+ * Returns index for hash code h.
+ *
+ * @param h
+ * @param length
+ * @return The index for the hash integer for the given length
+ */
+ static int indexFor(int h, int length)
+ {
+ return h & (length - 1);
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map.
+ *
+ * @return the number of key-value mappings in this map.
+ */
+ public int size()
+ {
+ return size;
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings.
+ */
+ public boolean isEmpty()
+ {
+ return size == 0;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped in this
identity hash map, or
+ * <tt>null</tt> if the map contains no mapping for this key. A return
value of <tt>null</tt>
+ * does not <i>necessarily</i> indicate that the map contains no
mapping for the key; it is
+ * also possible that the map explicitly maps the key to <tt>null</tt>.
The
+ * <tt>containsKey</tt> method may be used to distinguish these two
cases.
+ *
+ * @param key
+ * the key whose associated value is to be returned.
+ * @return the value to which this map maps the specified key, or
<tt>null</tt> if the map
+ * contains no mapping for this key.
+ * @see #put(int, Object)
+ */
+ public V get(int key)
+ {
+ int i = indexFor(key, table.length);
+ Entry<V> e = table[i];
+ while (true)
+ {
+ if (e == null)
+ {
+ return null;
+ }
+ if (key == e.key)
+ {
+ return e.value;
+ }
+ e = e.next;
+ }
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map contains a mapping for the
specified key.
+ *
+ * @param key
+ * The key whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map contains a mapping for the
specified key.
+ */
+ public boolean containsKey(int key)
+ {
+ int i = indexFor(key, table.length);
+ Entry<V> e = table[i];
+ while (e != null)
+ {
+ if (key == e.key)
+ {
+ return true;
+ }
+ e = e.next;
+ }
+ return false;
+ }
+
+ /**
+ * Returns the entry associated with the specified key in the HashMap.
Returns null if the
+ * HashMap contains no mapping for this key.
+ *
+ * @param key
+ * @return The Entry object for the given hash key
+ */
+ Entry<V> getEntry(int key)
+ {
+ int i = indexFor(key, table.length);
+ Entry<V> e = table[i];
+ while (e != null && !(key == e.key))
+ {
+ e = e.next;
+ }
+ return e;
+ }
+
+ /**
+ * Associates the specified value with the specified key in this map.
If the map previously
+ * contained a mapping for this key, the old value is replaced.
+ *
+ * @param key
+ * key with which the specified value is to be associated.
+ * @param value
+ * value to be associated with the specified key.
+ * @return previous value associated with specified key, or
<tt>null</tt> if there was no
+ * mapping for key. A <tt>null</tt> return can also indicate
that the HashMap
+ * previously associated <tt>null</tt> with the specified key.
+ */
+ public V put(int key, V value)
+ {
+ int i = indexFor(key, table.length);
+
+ for (Entry<V> e = table[i]; e != null; e = e.next)
+ {
+ if (key == e.key)
+ {
+ V oldValue = e.value;
+ e.value = value;
+ return oldValue;
+ }
+ }
+
+ modCount++;
+ addEntry(key, value, i);
+ return null;
+ }
+
+ /**
+ * This method is used instead of put by constructors and
pseudoconstructors (clone,
+ * readObject). It does not resize the table, check for comodification,
etc. It calls
+ * createEntry rather than addEntry.
+ *
+ * @param key
+ * @param value
+ */
+ private void putForCreate(int key, V value)
+ {
+ int i = indexFor(key, table.length);
+
+ /**
+ * Look for preexisting entry for key. This will never happen
for clone or deserialize. It
+ * will only happen for construction if the input Map is a
sorted map whose ordering is
+ * inconsistent w/ equals.
+ */
+ for (Entry<V> e = table[i]; e != null; e = e.next)
+ {
+ if (key == e.key)
+ {
+ e.value = value;
+ return;
+ }
+ }
+
+ createEntry(key, value, i);
+ }
+
+ void putAllForCreate(IntHashMap<V> m)
+ {
+ for (Iterator<Entry<V>> i = m.entrySet().iterator();
i.hasNext();)
+ {
+ Entry<V> e = i.next();
+ putForCreate(e.getKey(), e.getValue());
+ }
+ }
+
+ /**
+ * Rehashes the contents of this map into a new array with a larger
capacity. This method is
+ * called automatically when the number of keys in this map reaches its
threshold.
+ *
+ * If current capacity is MAXIMUM_CAPACITY, this method does not resize
the map, but but sets
+ * threshold to Integer.MAX_VALUE. This has the effect of preventing
future calls.
+ *
+ * @param newCapacity
+ * the new capacity, MUST be a power of two; must be greater
than current capacity
+ * unless current capacity is MAXIMUM_CAPACITY (in which
case value is irrelevant).
+ */
+ @SuppressWarnings("unchecked")
+ void resize(int newCapacity)
+ {
+ Entry<V>[] oldTable = table;
+ int oldCapacity = oldTable.length;
+ if (oldCapacity == MAXIMUM_CAPACITY)
+ {
+ threshold = Integer.MAX_VALUE;
+ return;
+ }
+
+ Entry<V>[] newTable = new Entry[newCapacity];
+ transfer(newTable);
+ table = newTable;
+ threshold = (int)(newCapacity * loadFactor);
+ }
+
+ /**
+ * Transfer all entries from current table to newTable.
+ *
+ * @param newTable
+ */
+ void transfer(Entry<V>[] newTable)
+ {
+ Entry<V>[] src = table;
+ int newCapacity = newTable.length;
+ for (int j = 0; j < src.length; j++)
+ {
+ Entry<V> e = src[j];
+ if (e != null)
+ {
+ src[j] = null;
+ do
+ {
+ Entry<V> next = e.next;
+ int i = indexFor(e.key, newCapacity);
+ e.next = newTable[i];
+ newTable[i] = e;
+ e = next;
+ }
+ while (e != null);
+ }
+ }
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this map These
mappings will replace any
+ * mappings that this map had for any of the keys currently in the
specified map.
+ *
+ * @param m
+ * mappings to be stored in this map.
+ * @throws NullPointerException
+ * if the specified map is null.
+ */
+ public void putAll(IntHashMap<V> m)
+ {
+ int numKeysToBeAdded = m.size();
+ if (numKeysToBeAdded == 0)
+ {
+ return;
+ }
+
+ /*
+ * Expand the map if the map if the number of mappings to be
added is greater than or equal
+ * to threshold. This is conservative; the obvious condition is
(m.size() + size) >=
+ * threshold, but this condition could result in a map with
twice the appropriate capacity,
+ * if the keys to be added overlap with the keys already in
this map. By using the
+ * conservative calculation, we subject ourself to at most one
extra resize.
+ */
+ if (numKeysToBeAdded > threshold)
+ {
+ int targetCapacity = (int)(numKeysToBeAdded /
loadFactor + 1);
+ if (targetCapacity > MAXIMUM_CAPACITY)
+ {
+ targetCapacity = MAXIMUM_CAPACITY;
+ }
+ int newCapacity = table.length;
+ while (newCapacity < targetCapacity)
+ {
+ newCapacity <<= 1;
+ }
+ if (newCapacity > table.length)
+ {
+ resize(newCapacity);
+ }
+ }
+
+ for (Iterator<Entry<V>> i = m.entrySet().iterator();
i.hasNext();)
+ {
+ Entry<V> e = i.next();
+ put(e.getKey(), e.getValue());
+ }
+ }
+
+ /**
+ * Removes the mapping for this key from this map if present.
+ *
+ * @param key
+ * key whose mapping is to be removed from the map.
+ * @return previous value associated with specified key, or
<tt>null</tt> if there was no
+ * mapping for key. A <tt>null</tt> return can also indicate
that the map previously
+ * associated <tt>null</tt> with the specified key.
+ */
+ public V remove(int key)
+ {
+ Entry<V> e = removeEntryForKey(key);
+ return (e == null ? null : e.value);
+ }
+
+ /**
+ * Removes and returns the entry associated with the specified key in
the HashMap. Returns null
+ * if the HashMap contains no mapping for this key.
+ *
+ * @param key
+ * @return The Entry object that was removed
+ */
+ Entry<V> removeEntryForKey(int key)
+ {
+ int i = indexFor(key, table.length);
+ Entry<V> prev = table[i];
+ Entry<V> e = prev;
+
+ while (e != null)
+ {
+ Entry<V> next = e.next;
+ if (key == e.key)
+ {
+ modCount++;
+ size--;
+ if (prev == e)
+ {
+ table[i] = next;
+ }
+ else
+ {
+ prev.next = next;
+ }
+ return e;
+ }
+ prev = e;
+ e = next;
+ }
+
+ return e;
+ }
+
+ /**
+ * Special version of remove for EntrySet.
+ *
+ * @param o
+ * @return The entry that was removed
+ */
+ @SuppressWarnings("unchecked")
+ Entry<V> removeMapping(Object o)
+ {
+ if (!(o instanceof Entry))
+ {
+ return null;
+ }
+
+ Entry<V> entry = (Entry<V>)o;
+ int key = entry.getKey();
+ int i = indexFor(key, table.length);
+ Entry<V> prev = table[i];
+ Entry<V> e = prev;
+
+ while (e != null)
+ {
+ Entry<V> next = e.next;
+ if (e.key == key && e.equals(entry))
+ {
+ modCount++;
+ size--;
+ if (prev == e)
+ {
+ table[i] = next;
+ }
+ else
+ {
+ prev.next = next;
+ }
+ return e;
+ }
+ prev = e;
+ e = next;
+ }
+
+ return e;
+ }
+
+ /**
+ * Removes all mappings from this map.
+ */
+ public void clear()
+ {
+ modCount++;
+ Entry<V> tab[] = table;
+ for (int i = 0; i < tab.length; i++)
+ {
+ tab[i] = null;
+ }
+ size = 0;
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
specified value.
+ *
+ * @param value
+ * value whose presence in this map is to be tested.
+ * @return <tt>true</tt> if this map maps one or more keys to the
specified value.
+ */
+ public boolean containsValue(Object value)
+ {
+ if (value == null)
+ {
+ return containsNullValue();
+ }
+
+ Entry<V> tab[] = table;
+ for (int i = 0; i < tab.length; i++)
+ {
+ for (Entry<V> e = tab[i]; e != null; e = e.next)
+ {
+ if (value.equals(e.value))
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Special-case code for containsValue with null argument
+ *
+ * @return boolean true if there is a null value in this map
+ */
+ private boolean containsNullValue()
+ {
+ Entry<V> tab[] = table;
+ for (int i = 0; i < tab.length; i++)
+ {
+ for (Entry<V> e = tab[i]; e != null; e = e.next)
+ {
+ if (e.value == null)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys
and values themselves
+ * are not cloned.
+ *
+ * @return a shallow copy of this map.
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public Object clone() throws CloneNotSupportedException
+ {
+ IntHashMap<V> result = null;
+ try
+ {
+ result = (IntHashMap<V>)super.clone();
+ result.table = new Entry[table.length];
+ result.entrySet = null;
+ result.modCount = 0;
+ result.size = 0;
+ result.init();
+ result.putAllForCreate(this);
+ }
+ catch (CloneNotSupportedException e)
+ {
+ // assert false;
+ }
+ return result;
+ }
+
+ /**
+ * @author jcompagner
+ * @param <V>
+ * type of value object
+ */
+ public static class Entry<V>
+ {
+ final int key;
+ V value;
+ Entry<V> next;
+
+ /**
+ * Create new entry.
+ *
+ * @param k
+ * @param v
+ * @param n
+ */
+ Entry(int k, V v, Entry<V> n)
+ {
+ value = v;
+ next = n;
+ key = k;
+ }
+
+ /**
+ * @return The int key of this entry
+ */
+ public int getKey()
+ {
+ return key;
+ }
+
+ /**
+ * @return Gets the value object of this entry
+ */
+ public V getValue()
+ {
+ return value;
+ }
+
+ /**
+ * @param newValue
+ * @return The previous value
+ */
+ public V setValue(V newValue)
+ {
+ V oldValue = value;
+ value = newValue;
+ return oldValue;
+ }
+
+ /**
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean equals(Object o)
+ {
+ if (!(o instanceof Entry))
+ {
+ return false;
+ }
+ Entry<V> e = (Entry<V>)o;
+ int k1 = getKey();
+ int k2 = e.getKey();
+ if (k1 == k2)
+ {
+ Object v1 = getValue();
+ Object v2 = e.getValue();
+ if (v1 == v2 || (v1 != null && v1.equals(v2)))
+ {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode()
+ {
+ return key ^ (value == null ? 0 : value.hashCode());
+ }
+
+ /**
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString()
+ {
+ return getKey() + "=" + getValue(); //$NON-NLS-1$
+ }
+ }
+
+ /**
+ * Add a new entry with the specified key, value and hash code to the
specified bucket. It is
+ * the responsibility of this method to resize the table if appropriate.
+ *
+ * Subclass overrides this to alter the behavior of put method.
+ *
+ * @param key
+ * @param value
+ * @param bucketIndex
+ */
+ void addEntry(int key, V value, int bucketIndex)
+ {
+ table[bucketIndex] = new Entry<V>(key, value,
table[bucketIndex]);
+ if (size++ >= threshold)
+ {
+ resize(2 * table.length);
+ }
+ }
+
+ /**
+ * Like addEntry except that this version is used when creating entries
as part of Map
+ * construction or "pseudo-construction" (cloning, deserialization).
This version needn't worry
+ * about resizing the table.
+ *
+ * Subclass overrides this to alter the behavior of HashMap(Map),
clone, and readObject.
+ *
+ * @param key
+ * @param value
+ * @param bucketIndex
+ */
+ void createEntry(int key, V value, int bucketIndex)
+ {
+ table[bucketIndex] = new Entry<V>(key, value,
table[bucketIndex]);
+ size++;
+ }
+
+ private abstract class HashIterator<H> implements Iterator<H>
+ {
+ Entry<V> next; // next entry to return
+ int expectedModCount; // For fast-fail
+ int index; // current slot
+ Entry<V> current; // current entry
+
+ HashIterator()
+ {
+ expectedModCount = modCount;
+ Entry<V>[] t = table;
+ int i = t.length;
+ Entry<V> n = null;
+ if (size != 0)
+ { // advance to first entry
+ while (i > 0 && (n = t[--i]) == null)
+ {
+ /* NoOp */;
+ }
+ }
+ next = n;
+ index = i;
+ }
+
+ /**
+ * @see java.util.Iterator#hasNext()
+ */
+ public boolean hasNext()
+ {
+ return next != null;
+ }
+
+ Entry<V> nextEntry()
+ {
+ if (modCount != expectedModCount)
+ {
+ throw new ConcurrentModificationException();
+ }
+ Entry<V> e = next;
+ if (e == null)
+ {
+ throw new NoSuchElementException();
+ }
+
+ Entry<V> n = e.next;
+ Entry<V>[] t = table;
+ int i = index;
+ while (n == null && i > 0)
+ {
+ n = t[--i];
+ }
+ index = i;
+ next = n;
+ return current = e;
+ }
+
+ /**
+ * @see java.util.Iterator#remove()
+ */
+ public void remove()
+ {
+ if (current == null)
+ {
+ throw new IllegalStateException();
+ }
+ if (modCount != expectedModCount)
+ {
+ throw new ConcurrentModificationException();
+ }
+ int k = current.key;
+ current = null;
+ removeEntryForKey(k);
+ expectedModCount = modCount;
+ }
+
+ }
+
+ private class ValueIterator extends HashIterator<V>
+ {
+ /**
+ * @see java.util.Iterator#next()
+ */
+ public V next()
+ {
+ return nextEntry().value;
+ }
+ }
+
+ private class KeyIterator extends HashIterator<Integer>
+ {
+ /**
+ * @see java.util.Iterator#next()
+ */
+ public Integer next()
+ {
+ return new Integer(nextEntry().getKey());
+ }
+ }
+
+ private class EntryIterator extends HashIterator<Entry<V>>
+ {
+ /**
+ * @see java.util.Iterator#next()
+ */
+ public Entry<V> next()
+ {
+ Entry<V> nextEntry = nextEntry();
+ return nextEntry;
+ }
+ }
+
+ // Subclass overrides these to alter behavior of views' iterator()
method
+ Iterator<Integer> newKeyIterator()
+ {
+ return new KeyIterator();
+ }
+
+ Iterator<V> newValueIterator()
+ {
+ return new ValueIterator();
+ }
+
+ Iterator<Entry<V>> newEntryIterator()
+ {
+ return new EntryIterator();
+ }
+
+ // Views
+
+ private transient Set<Entry<V>> entrySet = null;
+
+ /**
+ * Returns a set view of the keys contained in this map. The set is
backed by the map, so
+ * changes to the map are reflected in the set, and vice-versa. The set
supports element
+ * removal, which removes the corresponding mapping from this map, via
the
+ * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>,
+ * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
support the <tt>add</tt>
+ * or <tt>addAll</tt> operations.
+ *
+ * @return a set view of the keys contained in this map.
+ */
+ public Set<Integer> keySet()
+ {
+ Set<Integer> ks = keySet;
+ return (ks != null ? ks : (keySet = new KeySet()));
+ }
+
+ private class KeySet extends AbstractSet<Integer>
+ {
+ /**
+ * @see java.util.AbstractCollection#iterator()
+ */
+ @Override
+ public Iterator<Integer> iterator()
+ {
+ return newKeyIterator();
+ }
+
+ /**
+ * @see java.util.AbstractCollection#size()
+ */
+ @Override
+ public int size()
+ {
+ return size;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#contains(java.lang.Object)
+ */
+ @Override
+ public boolean contains(Object o)
+ {
+ if (o instanceof Number)
+ {
+ return containsKey(((Number)o).intValue());
+ }
+ return false;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#remove(java.lang.Object)
+ */
+ @Override
+ public boolean remove(Object o)
+ {
+ if (o instanceof Number)
+ {
+ return
removeEntryForKey(((Number)o).intValue()) != null;
+ }
+ return false;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#clear()
+ */
+ @Override
+ public void clear()
+ {
+ IntHashMap.this.clear();
+ }
+ }
+
+ /**
+ * Returns a collection view of the values contained in this map. The
collection is backed by
+ * the map, so changes to the map are reflected in the collection, and
vice-versa. The
+ * collection supports element removal, which removes the corresponding
mapping from this map,
+ * via the <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
<tt>removeAll</tt>,
+ * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
support the <tt>add</tt>
+ * or <tt>addAll</tt> operations.
+ *
+ * @return a collection view of the values contained in this map.
+ */
+ public Collection<V> values()
+ {
+ Collection<V> vs = values;
+ return (vs != null ? vs : (values = new Values()));
+ }
+
+ private class Values extends AbstractCollection<V>
+ {
+ /**
+ * @see java.util.AbstractCollection#iterator()
+ */
+ @Override
+ public Iterator<V> iterator()
+ {
+ return newValueIterator();
+ }
+
+ /**
+ * @see java.util.AbstractCollection#size()
+ */
+ @Override
+ public int size()
+ {
+ return size;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#contains(java.lang.Object)
+ */
+ @Override
+ public boolean contains(Object o)
+ {
+ return containsValue(o);
+ }
+
+ /**
+ * @see java.util.AbstractCollection#clear()
+ */
+ @Override
+ public void clear()
+ {
+ IntHashMap.this.clear();
+ }
+ }
+
+ /**
+ * Returns a collection view of the mappings contained in this map.
Each element in the returned
+ * collection is a <tt>Map.Entry</tt>. The collection is backed by the
map, so changes to the
+ * map are reflected in the collection, and vice-versa. The collection
supports element removal,
+ * which removes the corresponding mapping from the map, via the
<tt>Iterator.remove</tt>,
+ * <tt>Collection.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>,
and
+ * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
<tt>addAll</tt>
+ * operations.
+ *
+ * @return a collection view of the mappings contained in this map.
+ * @see Map.Entry
+ */
+ public Set<Entry<V>> entrySet()
+ {
+ Set<Entry<V>> es = entrySet;
+ return (es != null ? es : (entrySet = new EntrySet()));
+ }
+
+ private class EntrySet extends AbstractSet<Entry<V>>
+ {
+ /**
+ * @see java.util.AbstractCollection#iterator()
+ */
+ @Override
+ public Iterator<Entry<V>> iterator()
+ {
+ return newEntryIterator();
+ }
+
+ /**
+ * @see java.util.AbstractCollection#contains(java.lang.Object)
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean contains(Object o)
+ {
+ if (!(o instanceof Entry))
+ {
+ return false;
+ }
+ Entry<V> e = (Entry<V>)o;
+ Entry<V> candidate = getEntry(e.getKey());
+ return candidate != null && candidate.equals(e);
+ }
+
+ /**
+ * @see java.util.AbstractCollection#remove(java.lang.Object)
+ */
+ @Override
+ public boolean remove(Object o)
+ {
+ return removeMapping(o) != null;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#size()
+ */
+ @Override
+ public int size()
+ {
+ return size;
+ }
+
+ /**
+ * @see java.util.AbstractCollection#clear()
+ */
+ @Override
+ public void clear()
+ {
+ IntHashMap.this.clear();
+ }
+ }
+
+ /**
+ * Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
serialize it).
+ *
+ * @param s
+ * The ObjectOutputStream
+ * @throws IOException
+ *
+ * @serialData The <i>capacity</i> of the HashMap (the length of the
bucket array) is emitted
+ * (int), followed by the <i>size</i> of the HashMap (the
number of key-value
+ * mappings), followed by the key (Object) and value
(Object) for each key-value
+ * mapping represented by the HashMap The key-value
mappings are emitted in the
+ * order that they are returned by
<tt>entrySet().iterator()</tt>.
+ *
+ */
+ private void writeObject(java.io.ObjectOutputStream s) throws
IOException
+ {
+ // Write out the threshold, loadfactor, and any hidden stuff
+ s.defaultWriteObject();
+
+ // Write out number of buckets
+ s.writeInt(table.length);
+
+ // Write out size (number of Mappings)
+ s.writeInt(size);
+
+ // Write out keys and values (alternating)
+ for (Iterator<Entry<V>> i = entrySet().iterator(); i.hasNext();)
+ {
+ Entry<V> e = i.next();
+ s.writeInt(e.getKey());
+ s.writeObject(e.getValue());
+ }
+ }
+
+ private static final long serialVersionUID = 362498820763181265L;
+
+ /**
+ * Reconstitute the <tt>HashMap</tt> instance from a stream (i.e.,
deserialize it).
+ *
+ * @param s
+ * @throws IOException
+ * @throws ClassNotFoundException
+ */
+ @SuppressWarnings("unchecked")
+ private void readObject(java.io.ObjectInputStream s) throws
IOException, ClassNotFoundException
+ {
+ // Read in the threshold, loadfactor, and any hidden stuff
+ s.defaultReadObject();
+
+ // Read in number of buckets and allocate the bucket array;
+ int numBuckets = s.readInt();
+ table = new Entry[numBuckets];
+
+ init(); // Give subclass a chance to do its thing.
+
+ // Read in size (number of Mappings)
+ int size = s.readInt();
+
+ // Read the keys and values, and put the mappings in the HashMap
+ for (int i = 0; i < size; i++)
+ {
+ int key = s.readInt();
+ V value = (V)s.readObject();
+ putForCreate(key, value);
+ }
+ }
+
+ // These methods are used when serializing HashSets
+ int capacity()
+ {
+ return table.length;
+ }
+
+ float loadFactor()
+ {
+ return loadFactor;
+ }
+}
\ No newline at end of file
Propchange:
wicket/sandbox/knopp/experimental/wicket-ng/src/main/java/org/apache/wicket/util/collections/IntHashMap.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Added:
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java
URL:
http://svn.apache.org/viewvc/wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java?rev=760488&view=auto
==============================================================================
---
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java
(added)
+++
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java
Tue Mar 31 15:42:32 2009
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.wicket.page.persistent.disk;
+
+import junit.framework.TestCase;
+
+import org.apache.wicket.page.persistent.disk.PageWindowManager.PageWindow;
+
+/**
+ * @author Matej Knopp
+ */
+public class PageWindowManagerTest extends TestCase
+{
+ /**
+ *
+ */
+ public void testAddRemove()
+ {
+ PageWindowManager manager = new PageWindowManager(300);
+ PageWindow window;
+
+ window = manager.createPageWindow(1, 50);
+ assertWindow(window, 1, 0, 50);
+
+ window = manager.createPageWindow(2, 40);
+ assertWindow(window, 2, 50, 40);
+
+ assertEquals(manager.getTotalSize(), 90);
+
+ window = manager.createPageWindow(2, 30);
+ assertWindow(window, 2, 50, 30);
+ assertEquals(manager.getTotalSize(), 80);
+
+ manager.removePage(2);
+ assertEquals(manager.getTotalSize(), 50);
+
+ window = manager.createPageWindow(3, 30);
+ assertWindow(window, 3, 50, 30);
+ assertEquals(manager.getTotalSize(), 80);
+ }
+
+ /**
+ *
+ */
+ public void testPageWindowCycle()
+ {
+ PageWindowManager manager = new PageWindowManager(100);
+ PageWindow window;
+
+ window = manager.createPageWindow(1, 30);
+
+ window = manager.createPageWindow(2, 30);
+
+ window = manager.createPageWindow(3, 30);
+
+ assertWindow(window, 3, 60, 30);
+
+ window = manager.createPageWindow(4, 30);
+
+ assertWindow(window, 4, 90, 30);
+
+ // should start at the beginging
+
+ window = manager.createPageWindow(5, 20);
+
+ assertWindow(window, 5, 0, 20);
+
+ assertNull(manager.getPageWindow(1));
+
+ window = manager.getPageWindow(2);
+ assertWindow(window, 2, 30, 30);
+
+ window = manager.createPageWindow(6, 10);
+
+ assertWindow(window, 6, 20, 10);
+
+ window = manager.getPageWindow(2);
+ assertWindow(window, 2, 30, 30);
+
+ window = manager.createPageWindow(6, 30);
+ assertWindow(window, 6, 20, 30);
+
+ assertNull(manager.getPageWindow(2));
+ assertNotNull(manager.getPageWindow( 3));
+
+ window = manager.createPageWindow(6, 60);
+ assertWindow(window, 6, 20, 60);
+
+ assertNull(manager.getPageWindow(3));
+
+ window = manager.createPageWindow(7, 20);
+ assertWindow(window,7, 80, 20);
+
+ assertNotNull(manager.getPageWindow( 7));
+
+ // should start at the beginning again
+
+ window = manager.createPageWindow(8, 10);
+ assertWindow(window, 8, 0, 10);
+
+ assertNull(manager.getPageWindow(5));
+ assertNotNull(manager.getPageWindow(6));
+
+ window = manager.createPageWindow( 9, 20);
+ assertWindow(window, 9, 10, 20);
+
+ assertNull(manager.getPageWindow(6));
+ assertNotNull(manager.getPageWindow(7));
+
+ window = manager.createPageWindow(10, 20);
+ assertWindow(window,10, 30, 20);
+
+ assertNull(manager.getPageWindow(6));
+ assertNotNull(manager.getPageWindow(7));
+
+ // make sure when replacing a page that's not last the old
"instance" is
+ // not valid anymore
+
+ manager.createPageWindow(8, 10);
+
+ window = manager.getPageWindow(8);
+ assertWindow(window, 8, 50, 10);
+ }
+
+
+ private void assertWindow(PageWindow window, int pageId, int
filePartOffset, int filePartSize)
+ {
+ assertTrue(window.getPageId() == pageId &&
+ window.getFilePartOffset() == filePartOffset &&
+ window.getFilePartSize() == filePartSize);
+ }
+}
Propchange:
wicket/sandbox/knopp/experimental/wicket-ng/src/test/java/org/apache/wicket/page/persistent/disk/PageWindowManagerTest.java
------------------------------------------------------------------------------
svn:mime-type = text/plain