-stable review patch.  If anyone has any objections, please let us know.
------------------

From: YOSHIFUJI Hideaki <[EMAIL PROTECTED]>

Join all-node multicast group after assignment of dev->ip6_ptr
because it must be assigned when ipv6_dev_mc_inc() is called.
This fixes Bug#7817, reported by <[EMAIL PROTECTED]>.

Closes: 7817
Signed-off-by: YOSHIFUJI Hideaki <[EMAIL PROTECTED]>
Signed-off-by: Chris Wright <[EMAIL PROTECTED]>
---
 net/ipv6/addrconf.c |    6 ++++++
 net/ipv6/mcast.c    |    6 ------
 2 files changed, 6 insertions(+), 6 deletions(-)

--- linux-2.6.19.2.orig/net/ipv6/addrconf.c
+++ linux-2.6.19.2/net/ipv6/addrconf.c
@@ -341,6 +341,7 @@ void in6_dev_finish_destroy(struct inet6
 static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
 {
        struct inet6_dev *ndev;
+       struct in6_addr maddr;
 
        ASSERT_RTNL();
 
@@ -425,6 +426,11 @@ static struct inet6_dev * ipv6_add_dev(s
 #endif
        /* protected by rtnl_lock */
        rcu_assign_pointer(dev->ip6_ptr, ndev);
+
+       /* Join all-node multicast group */
+       ipv6_addr_all_nodes(&maddr);
+       ipv6_dev_mc_inc(dev, &maddr);
+
        return ndev;
 }
 
--- linux-2.6.19.2.orig/net/ipv6/mcast.c
+++ linux-2.6.19.2/net/ipv6/mcast.c
@@ -2252,8 +2252,6 @@ void ipv6_mc_up(struct inet6_dev *idev)
 
 void ipv6_mc_init_dev(struct inet6_dev *idev)
 {
-       struct in6_addr maddr;
-
        write_lock_bh(&idev->lock);
        rwlock_init(&idev->mc_lock);
        idev->mc_gq_running = 0;
@@ -2269,10 +2267,6 @@ void ipv6_mc_init_dev(struct inet6_dev *
        idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
        idev->mc_v1_seen = 0;
        write_unlock_bh(&idev->lock);
-
-       /* Add all-nodes address. */
-       ipv6_addr_all_nodes(&maddr);
-       ipv6_dev_mc_inc(idev->dev, &maddr);
 }
 
 /*

--
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to