Author: arekm                        Date: Mon Oct 24 19:03:59 2011 GMT
Module: packages                      Tag: HEAD
---- Log message:
- update for 3.1

---- Files affected:
packages/kernel:
   kernel-aufs2-unionfs.patch (1.5 -> 1.6) , kernel-small_fixes.patch (1.43 -> 
1.44) 

---- Diffs:

================================================================
Index: packages/kernel/kernel-aufs2-unionfs.patch
diff -u packages/kernel/kernel-aufs2-unionfs.patch:1.5 
packages/kernel/kernel-aufs2-unionfs.patch:1.6
--- packages/kernel/kernel-aufs2-unionfs.patch:1.5      Mon Jul 11 22:01:50 2011
+++ packages/kernel/kernel-aufs2-unionfs.patch  Mon Oct 24 21:03:53 2011
@@ -46,7 +46,7 @@
 @@ -82,11 +82,11 @@ extern int vfs_path_lookup(struct dentry *, struct 
vfsmount *,
  extern int kern_path_parent(const char *, struct nameidata *);
  extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
-                          const char *, unsigned int, struct nameidata *);
+                          const char *, unsigned int, struct path *);
 +extern struct dentry *lookup_hash(struct nameidata *nd);
  
  extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct 
dentry *dentry,
@@ -60,16 +60,16 @@
 index 997c3b4..be9a153 100644
 --- a/include/linux/splice.h
 +++ b/include/linux/splice.h
-@@ -89,10 +89,4 @@ extern int splice_grow_spd(struct pipe_inode_info *, struct 
splice_pipe_desc *);
- extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+@@ -91,10 +91,4 @@ extern void splice_shrink_spd(struct pip
+ extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
  
+ extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+-
 -extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
 -                         loff_t *ppos, size_t len, unsigned int flags);
 -extern long do_splice_to(struct file *in, loff_t *ppos,
 -                       struct pipe_inode_info *pipe, size_t len,
 -                       unsigned int flags);
--
  #endif
 aufs2 standalone patch for linux-2.6.35
 

================================================================
Index: packages/kernel/kernel-small_fixes.patch
diff -u packages/kernel/kernel-small_fixes.patch:1.43 
packages/kernel/kernel-small_fixes.patch:1.44
--- packages/kernel/kernel-small_fixes.patch:1.43       Wed Oct 19 22:20:43 2011
+++ packages/kernel/kernel-small_fixes.patch    Mon Oct 24 21:03:53 2011
@@ -136,203 +136,6 @@
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html
 Please read the FAQ at  http://www.tux.org/lkml/
-commit 3326c784c9f492e988617d93f647ae0cfd4c8d09
-Author: Jiri Pirko <jpi...@redhat.com>
-Date:   Wed Jul 20 04:54:38 2011 +0000
-
-    forcedeth: do vlan cleanup
-    
-    - unify vlan and nonvlan rx path
-    - kill np->vlangrp and nv_vlan_rx_register
-    - allow to turn on/off rx vlan accel via ethtool (set_features)
-    
-    Signed-off-by: Jiri Pirko <jpi...@redhat.com>
-    Signed-off-by: David S. Miller <da...@davemloft.net>
-
-diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
-index 537b695..e64cd9c 100644
---- a/drivers/net/forcedeth.c
-+++ b/drivers/net/forcedeth.c
-@@ -820,9 +820,6 @@ struct fe_priv {
-       struct nv_skb_map *tx_end_flip;
-       int tx_stop;
- 
--      /* vlan fields */
--      struct vlan_group *vlangrp;
--
-       /* msi/msi-x fields */
-       u32 msi_flags;
-       struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
-@@ -2766,17 +2763,13 @@ static int nv_rx_process_optimized(struct net_device 
*dev, int limit)
-                       skb->protocol = eth_type_trans(skb, dev);
-                       prefetch(skb->data);
- 
--                      if (likely(!np->vlangrp)) {
--                              napi_gro_receive(&np->napi, skb);
--                      } else {
--                              vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
--                              if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
--                                      vlan_gro_receive(&np->napi, np->vlangrp,
--                                                       vlanflags & 
NV_RX3_VLAN_TAG_MASK, skb);
--                              } else {
--                                      napi_gro_receive(&np->napi, skb);
--                              }
-+                      vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
-+                      if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-+                              u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
-+
-+                              __vlan_hwaccel_put_tag(skb, vid);
-                       }
-+                      napi_gro_receive(&np->napi, skb);
- 
-                       dev->stats.rx_packets++;
-                       dev->stats.rx_bytes += len;
-@@ -4484,6 +4477,27 @@ static u32 nv_fix_features(struct net_device *dev, u32 
features)
-       return features;
- }
- 
-+static void nv_vlan_mode(struct net_device *dev, u32 features)
-+{
-+      struct fe_priv *np = get_nvpriv(dev);
-+
-+      spin_lock_irq(&np->lock);
-+
-+      if (features & NETIF_F_HW_VLAN_RX)
-+              np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
-+      else
-+              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
-+
-+      if (features & NETIF_F_HW_VLAN_TX)
-+              np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
-+      else
-+              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
-+
-+      writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-+
-+      spin_unlock_irq(&np->lock);
-+}
-+
- static int nv_set_features(struct net_device *dev, u32 features)
- {
-       struct fe_priv *np = netdev_priv(dev);
-@@ -4504,6 +4518,9 @@ static int nv_set_features(struct net_device *dev, u32 
features)
-               spin_unlock_irq(&np->lock);
-       }
- 
-+      if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
-+              nv_vlan_mode(dev, features);
-+
-       return 0;
- }
- 
-@@ -4879,29 +4896,6 @@ static const struct ethtool_ops ops = {
-       .self_test = nv_self_test,
- };
- 
--static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group 
*grp)
--{
--      struct fe_priv *np = get_nvpriv(dev);
--
--      spin_lock_irq(&np->lock);
--
--      /* save vlan group */
--      np->vlangrp = grp;
--
--      if (grp) {
--              /* enable vlan on MAC */
--              np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | 
NVREG_TXRXCTL_VLANINS;
--      } else {
--              /* disable vlan on MAC */
--              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
--              np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
--      }
--
--      writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
--
--      spin_unlock_irq(&np->lock);
--}
--
- /* The mgmt unit and driver use a semaphore to access the phy during init */
- static int nv_mgmt_acquire_sema(struct net_device *dev)
- {
-@@ -5208,7 +5202,6 @@ static const struct net_device_ops nv_netdev_ops = {
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = nv_set_mac_address,
-       .ndo_set_multicast_list = nv_set_multicast,
--      .ndo_vlan_rx_register   = nv_vlan_rx_register,
- #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = nv_poll_controller,
- #endif
-@@ -5226,7 +5219,6 @@ static const struct net_device_ops 
nv_netdev_ops_optimized = {
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = nv_set_mac_address,
-       .ndo_set_multicast_list = nv_set_multicast,
--      .ndo_vlan_rx_register   = nv_vlan_rx_register,
- #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = nv_poll_controller,
- #endif
-commit 0891b0e08937aaec2c4734acb94c5ff8042313bb
-Author: Jiri Pirko <jpi...@redhat.com>
-Date:   Tue Jul 26 10:19:28 2011 +0000
-
-    forcedeth: fix vlans
-    
-    For some reason, when rxaccel is disabled, NV_RX3_VLAN_TAG_PRESENT is
-    still set and some pseudorandom vids appear. So check for
-    NETIF_F_HW_VLAN_RX as well. Also set correctly hw_features and set vlan
-    mode on probe.
-    
-    Signed-off-by: Jiri Pirko <jpi...@redhat.com>
-    Signed-off-by: David S. Miller <da...@davemloft.net>
-
-diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
-index e64cd9c..e55df30 100644
---- a/drivers/net/forcedeth.c
-+++ b/drivers/net/forcedeth.c
-@@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device 
*dev, int limit)
-                       prefetch(skb->data);
- 
-                       vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
--                      if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-+
-+                      /*
-+                       * There's need to check for NETIF_F_HW_VLAN_RX here.
-+                       * Even if vlan rx accel is disabled,
-+                       * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
-+                       */
-+                      if (dev->features & NETIF_F_HW_VLAN_RX &&
-+                          vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-                               u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
- 
-                               __vlan_hwaccel_put_tag(skb, vid);
-@@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, 
const struct pci_device_i
-               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
-               dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
-                       NETIF_F_TSO | NETIF_F_RXCSUM;
--              dev->features |= dev->hw_features;
-       }
- 
-       np->vlanctl_bits = 0;
-       if (id->driver_data & DEV_HAS_VLAN) {
-               np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
--              dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
-+              dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
-       }
- 
-+      dev->features |= dev->hw_features;
-+
-       np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | 
NV_PAUSEFRAME_AUTONEG;
-       if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
-           (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
-@@ -5607,6 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, 
const struct pci_device_i
-               goto out_error;
-       }
- 
-+      nv_vlan_mode(dev, dev->features);
-+
-       netif_carrier_off(dev);
- 
-       dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 --- linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh~      2011-07-22 
04:17:23.000000000 +0200
 +++ linux-3.0/scripts/kconfig/lxdialog/check-lxdialog.sh       2011-08-25 
21:26:04.799150642 +0200
 @@ -9,6 +9,12 @@
@@ -348,887 +151,6 @@
                                exit
                        fi
                done
-commit 37b652ec6445be99d0193047d1eda129a1a315d3
-Author: Dave Chinner <dchin...@redhat.com>
-Date:   Thu Aug 25 07:17:01 2011 +0000
-
-    xfs: don't serialise direct IO reads on page cache checks
-    
-    There is no need to grab the i_mutex of the IO lock in exclusive
-    mode if we don't need to invalidate the page cache. Taking these
-    locks on every direct IO effective serialises them as taking the IO
-    lock in exclusive mode has to wait for all shared holders to drop
-    the lock. That only happens when IO is complete, so effective it
-    prevents dispatch of concurrent direct IO reads to the same inode.
-    
-    Fix this by taking the IO lock shared to check the page cache state,
-    and only then drop it and take the IO lock exclusively if there is
-    work to be done. Hence for the normal direct IO case, no exclusive
-    locking will occur.
-    
-    Signed-off-by: Dave Chinner <dchin...@redhat.com>
-    Tested-by: Joern Engel <jo...@logfs.org>
-    Reviewed-by: Christoph Hellwig <h...@lst.de>
-    Signed-off-by: Alex Elder <ael...@sgi.com>
-
-diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
-index 7f7b424..8fd4a07 100644
---- a/fs/xfs/linux-2.6/xfs_file.c
-+++ b/fs/xfs/linux-2.6/xfs_file.c
-@@ -317,7 +317,19 @@ xfs_file_aio_read(
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
- 
--      if (unlikely(ioflags & IO_ISDIRECT)) {
-+      /*
-+       * Locking is a bit tricky here. If we take an exclusive lock
-+       * for direct IO, we effectively serialise all new concurrent
-+       * read IO to this file and block it behind IO that is currently in
-+       * progress because IO in progress holds the IO lock shared. We only
-+       * need to hold the lock exclusive to blow away the page cache, so
-+       * only take lock exclusively if the page cache needs invalidation.
-+       * This allows the normal direct IO case of no page cache pages to
-+       * proceeed concurrently without serialisation.
-+       */
-+      xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
-+      if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
-+              xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
-               xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
- 
-               if (inode->i_mapping->nrpages) {
-@@ -330,8 +342,7 @@ xfs_file_aio_read(
-                       }
-               }
-               xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
--      } else
--              xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
-+      }
- 
-       trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
- 
-
-
-
-Start the periodic sync workers only after we have finished xfs_mountfs
-and thus fully set up the filesystem structures.  Without this we can
-call into xfs_qm_sync before the quotainfo strucute is set up if the
-mount takes unusually long, and probably hit other incomplete states
-as well.
-    
-Also clean up the xfs_fs_fill_super error path by using consistent
-label names, and removing an impossible to reach case.
-
-Signed-off-by: Christoph Hellwig <h...@lst.de>
-Reported-by: Arkadiusz Miskiewicz <ar...@maven.pl>
-Reviewed-by: Alex Elder <ael...@sgi.com>
-
-diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
-index a1a881e..3ebb458 100644
---- a/fs/xfs/linux-2.6/xfs_super.c
-+++ b/fs/xfs/linux-2.6/xfs_super.c
-@@ -1412,37 +1412,35 @@ xfs_fs_fill_super(
-       sb->s_time_gran = 1;
-       set_posix_acl_flag(sb);
- 
--      error = xfs_syncd_init(mp);
--      if (error)
--              goto out_filestream_unmount;
--
-       xfs_inode_shrinker_register(mp);
- 
-       error = xfs_mountfs(mp);
-       if (error)
--              goto out_syncd_stop;
-+              goto out_filestream_unmount;
-+
-+      error = xfs_syncd_init(mp);
-+      if (error)
-+              goto out_unmount;
- 
-       root = igrab(VFS_I(mp->m_rootip));
-       if (!root) {
-               error = ENOENT;
--              goto fail_unmount;
-+              goto out_syncd_stop;
-       }
-       if (is_bad_inode(root)) {
-               error = EINVAL;
--              goto fail_vnrele;
-+              goto out_syncd_stop;
-       }
-       sb->s_root = d_alloc_root(root);
-       if (!sb->s_root) {
-               error = ENOMEM;
--              goto fail_vnrele;
-+              goto out_iput;
-       }
- 
-       return 0;
- 
-- out_syncd_stop:
--      xfs_inode_shrinker_unregister(mp);
--      xfs_syncd_stop(mp);
-  out_filestream_unmount:
-+      xfs_inode_shrinker_unregister(mp);
-       xfs_filestream_unmount(mp);
-  out_free_sb:
-       xfs_freesb(mp);
-@@ -1456,17 +1454,12 @@ xfs_fs_fill_super(
-  out:
-       return -error;
- 
-- fail_vnrele:
--      if (sb->s_root) {
--              dput(sb->s_root);
--              sb->s_root = NULL;
--      } else {
--              iput(root);
--      }
--
-- fail_unmount:
--      xfs_inode_shrinker_unregister(mp);
-+ out_iput:
-+      iput(root);
-+ out_syncd_stop:
-       xfs_syncd_stop(mp);
-+ out_unmount:
-+      xfs_inode_shrinker_unregister(mp);
- 
-       /*
-        * Blow away any referenced inode in the filestreams cache.
-
-_______________________________________________
-xfs mailing list
-x...@oss.sgi.com
-http://oss.sgi.com/mailman/listinfo/xfs
-
-
-From: Dave Chinner <dchin...@redhat.com>
-
-commit 1d8c95a363bf8cd4d4182dd19c01693b635311c2 upstream
-
-
-xfs: use a cursor for bulk AIL insertion
-
-Delayed logging can insert tens of thousands of log items into the
-AIL at the same LSN. When the committing of log commit records
-occur, we can get insertions occurring at an LSN that is not at the
-end of the AIL. If there are thousands of items in the AIL on the
-tail LSN, each insertion has to walk the AIL to find the correct
-place to insert the new item into the AIL. This can consume large
-amounts of CPU time and block other operations from occurring while
-the traversals are in progress.
-
-To avoid this repeated walk, use a AIL cursor to record
-where we should be inserting the new items into the AIL without
-having to repeat the walk. The cursor infrastructure already
-provides this functionality for push walks, so is a simple extension
-of existing code. While this will not avoid the initial walk, it
-will avoid repeating it tens of thousands of times during a single
-checkpoint commit.
-
-This version includes logic improvements from Christoph Hellwig.
-
-Signed-off-by: Dave Chinner <dchin...@redhat.com>
-Reviewed-by: Christoph Hellwig <h...@lst.de>
-Signed-off-by: Alex Elder <ael...@sgi.com>
-
-diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
-index c83f63b..efc147f 100644
---- a/fs/xfs/xfs_trans.c
-+++ b/fs/xfs/xfs_trans.c
-@@ -1426,6 +1426,7 @@ xfs_trans_committed(
- static inline void
- xfs_log_item_batch_insert(
-       struct xfs_ail          *ailp,
-+      struct xfs_ail_cursor   *cur,
-       struct xfs_log_item     **log_items,
-       int                     nr_items,
-       xfs_lsn_t               commit_lsn)
-@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
- 
-       spin_lock(&ailp->xa_lock);
-       /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
--      xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
-+      xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
- 
-       for (i = 0; i < nr_items; i++)
-               IOP_UNPIN(log_items[i], 0);
-@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
-  * as an iclog write error even though we haven't started any IO yet. Hence in
-  * this case all we need to do is IOP_COMMITTED processing, followed by an
-  * IOP_UNPIN(aborted) call.
-+ *
-+ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
-+ * at the end of the AIL, the insert cursor avoids the need to walk
-+ * the AIL to find the insertion point on every xfs_log_item_batch_insert()
-+ * call. This saves a lot of needless list walking and is a net win, even
-+ * though it slightly increases that amount of AIL lock traffic to set it up
-+ * and tear it down.
-  */
- void
- xfs_trans_committed_bulk(
-@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
- #define LOG_ITEM_BATCH_SIZE   32
-       struct xfs_log_item     *log_items[LOG_ITEM_BATCH_SIZE];
-       struct xfs_log_vec      *lv;
-+      struct xfs_ail_cursor   cur;
-       int                     i = 0;
- 
-+      spin_lock(&ailp->xa_lock);
-+      xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
-+      spin_unlock(&ailp->xa_lock);
-+
-       /* unpin all the log items */
-       for (lv = log_vector; lv; lv = lv->lv_next ) {
-               struct xfs_log_item     *lip = lv->lv_item;
-@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
-                       /*
-                        * Not a bulk update option due to unusual item_lsn.
-                        * Push into AIL immediately, rechecking the lsn once
--                       * we have the ail lock. Then unpin the item.
-+                       * we have the ail lock. Then unpin the item. This does
-+                       * not affect the AIL cursor the bulk insert path is
-+                       * using.
-                        */
-                       spin_lock(&ailp->xa_lock);
-                       if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
-@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
-               /* Item is a candidate for bulk AIL insert.  */
-               log_items[i++] = lv->lv_item;
-               if (i >= LOG_ITEM_BATCH_SIZE) {
--                      xfs_log_item_batch_insert(ailp, log_items,
-+                      xfs_log_item_batch_insert(ailp, &cur, log_items,
-                                       LOG_ITEM_BATCH_SIZE, commit_lsn);
-                       i = 0;
-               }
-@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
- 
-       /* make sure we insert the remainder! */
-       if (i)
--              xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
-+              xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
-+
-+      spin_lock(&ailp->xa_lock);
-+      xfs_trans_ail_cursor_done(ailp, &cur);
-+      spin_unlock(&ailp->xa_lock);
- }
- 
- /*
-diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
-index 5fc2380..9a69dc0 100644
---- a/fs/xfs/xfs_trans_ail.c
-+++ b/fs/xfs/xfs_trans_ail.c
-@@ -272,9 +272,9 @@ xfs_trans_ail_cursor_clear(
- }
- 
- /*
-- * Return the item in the AIL with the current lsn.
-- * Return the current tree generation number for use
-- * in calls to xfs_trans_next_ail().
-+ * Initialise the cursor to the first item in the AIL with the given @lsn.
-+ * This searches the list from lowest LSN to highest. Pass a @lsn of zero
-+ * to initialise the cursor to the first item in the AIL.
-  */
- xfs_log_item_t *
- xfs_trans_ail_cursor_first(
-@@ -300,31 +300,97 @@ out:
- }
- 
- /*
-- * splice the log item list into the AIL at the given LSN.
-+ * Initialise the cursor to the last item in the AIL with the given @lsn.
-+ * This searches the list from highest LSN to lowest. If there is no item with
-+ * the value of @lsn, then it sets the cursor to the last item with an LSN 
lower
-+ * than @lsn.
-+ */
-+static struct xfs_log_item *
-+__xfs_trans_ail_cursor_last(
-+      struct xfs_ail          *ailp,
-+      xfs_lsn_t               lsn)
-+{
-+      xfs_log_item_t          *lip;
-+
-+      list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
-+              if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
-+                      return lip;
-+      }
-+      return NULL;
-+}
-+
-+/*
-+ * Initialise the cursor to the last item in the AIL with the given @lsn.
-+ * This searches the list from highest LSN to lowest.
-+ */
-+struct xfs_log_item *
-+xfs_trans_ail_cursor_last(
-+      struct xfs_ail          *ailp,
-+      struct xfs_ail_cursor   *cur,
-+      xfs_lsn_t               lsn)
-+{
-+      xfs_trans_ail_cursor_init(ailp, cur);
-+      cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
-+      return cur->item;
-+}
-+
-+/*
-+ * splice the log item list into the AIL at the given LSN. We splice to the
-+ * tail of the given LSN to maintain insert order for push traversals. The
-+ * cursor is optional, allowing repeated updates to the same LSN to avoid
-+ * repeated traversals.
-  */
- static void
- xfs_ail_splice(
--      struct xfs_ail  *ailp,
--      struct list_head *list,
--      xfs_lsn_t       lsn)
-+      struct xfs_ail          *ailp,
-+      struct xfs_ail_cursor   *cur,
-+      struct list_head        *list,
-+      xfs_lsn_t               lsn)
- {
--      xfs_log_item_t  *next_lip;
-+      struct xfs_log_item     *lip = cur ? cur->item : NULL;
-+      struct xfs_log_item     *next_lip;
- 
--      /* If the list is empty, just insert the item.  */
--      if (list_empty(&ailp->xa_ail)) {
--              list_splice(list, &ailp->xa_ail);
--              return;
-+      /*
-+       * Get a new cursor if we don't have a placeholder or the existing one
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-aufs2-unionfs.patch?r1=1.5&r2=1.6&f=u
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel-small_fixes.patch?r1=1.43&r2=1.44&f=u

_______________________________________________
pld-cvs-commit mailing list
pld-cvs-commit@lists.pld-linux.org
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to