Re: [PATCH v2 net-next 2/4] net: dev: add batching to net_device notifiers

2016-02-07 Thread Julian Anastasov

Hello,

On Sat, 6 Feb 2016, Salam Noureddine wrote:

> On Sat, Feb 6, 2016 at 10:58 AM, Julian Anastasov  wrote:

> >> + /* call batch notifiers which act on net namespaces */
> >> + list_for_each_entry(dev, head, unreg_list) {
> >> + net_add_event_list(_head, dev_net(dev));
> >
> > Looks like we can move the above net_add_event_list with
> > the comment into the previous loop after NETDEV_UNREGISTER,
> > we will save some cycles.
> 
> I didn't move it into the previous loop because the NETDEV_UNREGISTER
> notifier can
> end up calling rollback_registered_many (for example the vlan driver
> unregistering
> all vlans on top of a device) in which case we would be using the
> event_list in the net
> namespace.

ok. May be another safe option is to call it just before
NETDEV_UNREGISTER, so that if rollback_registered_many is called
recursively we will have single NETDEV_UNREGISTER_BATCH call.
Same should work for NETDEV_DOWN_BATCH in dev_close_many. But
these are small optimizations, so it is up to you to decide.

Regards

--
Julian Anastasov 


Re: [PATCH v2 net-next 2/4] net: dev: add batching to net_device notifiers

2016-02-06 Thread Julian Anastasov

Hello,

On Thu, 4 Feb 2016, Salam Noureddine wrote:

> @@ -1572,12 +1583,17 @@ rollback:
>   call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
>   dev);
>   call_netdevice_notifier(nb, NETDEV_DOWN, dev);
> + call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
> + dev);

I now see that we should split the loop
here, so that NETDEV_DOWN_BATCH is called only once per net:

bool down = false;

for_each_netdev(net, dev) {
if (dev == last)
break;

if (dev->flags & IFF_UP) {
call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
dev);
call_netdevice_notifier(nb, NETDEV_DOWN, dev);
down = true;
}
}

rt_cache_flush and arp_ifdown_all will be called
on NETDEV_UNREGISTER_BATCH, so use 'down' flag:

if (down)
call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
net->loopback_dev);
for_each_netdev(net, dev) {
if (dev == last)
goto outroll;
call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
}
call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
net->loopback_dev);


>   }
>   call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
>   }
> + call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
> + net->loopback_dev);
>   }
>  
>  outroll:
> + call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH, last);
>   raw_notifier_chain_unregister(_chain, nb);
>   goto unlock;
>  }
> @@ -1614,9 +1630,13 @@ int unregister_netdevice_notifier(struct 
> notifier_block *nb)
>   call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
>   dev);
>   call_netdevice_notifier(nb, NETDEV_DOWN, dev);
> + call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
> + dev);

Same here, split loop for NETDEV_DOWN_BATCH because
arp_ifdown_all is slow.

>   }
>   call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
>   }
> + call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
> + net->loopback_dev);
>   }
>  unlock:
>   rtnl_unlock();

>  static void rollback_registered_many(struct list_head *head)
>  {
>   struct net_device *dev, *tmp;
> + struct net *net, *net_tmp;
>   LIST_HEAD(close_head);
> + LIST_HEAD(net_head);
>  
>   BUG_ON(dev_boot_phase);
>   ASSERT_RTNL();
> @@ -6465,8 +6489,6 @@ static void rollback_registered_many(struct list_head 
> *head)
>   synchronize_net();
>  
>   list_for_each_entry(dev, head, unreg_list) {
> - struct sk_buff *skb = NULL;
> -
>   /* Shutdown queueing discipline. */
>   dev_shutdown(dev);
>  
> @@ -6475,6 +6497,20 @@ static void rollback_registered_many(struct list_head 
> *head)
>  this device. They should clean all the things.
>   */
>   call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
> + }
> +
> + /* call batch notifiers which act on net namespaces */
> + list_for_each_entry(dev, head, unreg_list) {
> + net_add_event_list(_head, dev_net(dev));

Looks like we can move the above net_add_event_list with
the comment into the previous loop after NETDEV_UNREGISTER,
we will save some cycles.

> + }
> + list_for_each_entry_safe(net, net_tmp, _head, event_list) {
> + call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH,
> +  net->loopback_dev);
> + net_del_event_list(net);
> + }
> +
> + list_for_each_entry(dev, head, unreg_list) {
> + struct sk_buff *skb = NULL;
>  
>   if (!dev->rtnl_link_ops ||
>   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)

Regards

--
Julian Anastasov 


Re: [PATCH v2 net-next 2/4] net: dev: add batching to net_device notifiers

2016-02-06 Thread Salam Noureddine
On Sat, Feb 6, 2016 at 10:58 AM, Julian Anastasov  wrote:

>
> I now see that we should split the loop
> here, so that NETDEV_DOWN_BATCH is called only once per net:
>
> bool down = false;
>
> for_each_netdev(net, dev) {
> if (dev == last)
> break;
>
> if (dev->flags & IFF_UP) {
> call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
> dev);
> call_netdevice_notifier(nb, NETDEV_DOWN, dev);
> down = true;
> }
> }
>
> rt_cache_flush and arp_ifdown_all will be called
> on NETDEV_UNREGISTER_BATCH, so use 'down' flag:
>
> if (down)
> call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
> net->loopback_dev);
> for_each_netdev(net, dev) {
> if (dev == last)
> goto outroll;
> call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
> }
> call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
> net->loopback_dev);
>
>
>>   }
>>   call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
>>   }
>> + call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
>> + net->loopback_dev);
>>   }
>>
>>  outroll:
>> + call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH, last);
>>   raw_notifier_chain_unregister(_chain, nb);
>>   goto unlock;
>>  }

I am not sure we need to worry too much about optimizing the failure
code path to justify splitting into two loops.


>>  static void rollback_registered_many(struct list_head *head)
>>  {

>>   list_for_each_entry(dev, head, unreg_list) {
>> - struct sk_buff *skb = NULL;
>> -
>>   /* Shutdown queueing discipline. */
>>   dev_shutdown(dev);
>>
>> @@ -6475,6 +6497,20 @@ static void rollback_registered_many(struct list_head 
>> *head)
>>  this device. They should clean all the things.
>>   */
>>   call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
>> + }
>> +
>> + /* call batch notifiers which act on net namespaces */
>> + list_for_each_entry(dev, head, unreg_list) {
>> + net_add_event_list(_head, dev_net(dev));
>
> Looks like we can move the above net_add_event_list with
> the comment into the previous loop after NETDEV_UNREGISTER,
> we will save some cycles.
>

I didn't move it into the previous loop because the NETDEV_UNREGISTER
notifier can
end up calling rollback_registered_many (for example the vlan driver
unregistering
all vlans on top of a device) in which case we would be using the
event_list in the net
namespace.

> Julian Anastasov 

Thanks,

Salam


[PATCH v2 net-next 2/4] net: dev: add batching to net_device notifiers

2016-02-04 Thread Salam Noureddine
This can be used to optimize bringing down and unregsitering
net_devices by running certain cleanup operations only on the
net namespace instead of on each net_device.

Signed-off-by: Salam Noureddine 
---
 include/linux/netdevice.h |  2 ++
 net/core/dev.c| 48 ++-
 2 files changed, 45 insertions(+), 5 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c20b814..1b12269 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2183,6 +2183,8 @@ struct netdev_lag_lower_state_info {
 #define NETDEV_BONDING_INFO0x0019
 #define NETDEV_PRECHANGEUPPER  0x001A
 #define NETDEV_CHANGELOWERSTATE0x001B
+#define NETDEV_UNREGISTER_BATCH0x001C
+#define NETDEV_DOWN_BATCH  0x001D
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 914b4a2..dbd8995 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1439,6 +1439,8 @@ static int __dev_close(struct net_device *dev)
 int dev_close_many(struct list_head *head, bool unlink)
 {
struct net_device *dev, *tmp;
+   struct net *net, *net_tmp;
+   LIST_HEAD(net_head);
 
/* Remove the devices that don't need to be closed */
list_for_each_entry_safe(dev, tmp, head, close_list)
@@ -1447,13 +1449,22 @@ int dev_close_many(struct list_head *head, bool unlink)
 
__dev_close_many(head);
 
-   list_for_each_entry_safe(dev, tmp, head, close_list) {
+   list_for_each_entry(dev, head, close_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
+   }
+
+   list_for_each_entry_safe(dev, tmp, head, close_list) {
+   net_add_event_list(_head, dev_net(dev));
if (unlink)
list_del_init(>close_list);
}
 
+   list_for_each_entry_safe(net, net_tmp, _head, event_list) {
+   call_netdevice_notifiers(NETDEV_DOWN_BATCH, net->loopback_dev);
+   net_del_event_list(net);
+   }
+
return 0;
 }
 EXPORT_SYMBOL(dev_close_many);
@@ -1572,12 +1583,17 @@ rollback:
call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
dev);
call_netdevice_notifier(nb, NETDEV_DOWN, dev);
+   call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
+   dev);
}
call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
}
+   call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
+   net->loopback_dev);
}
 
 outroll:
+   call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH, last);
raw_notifier_chain_unregister(_chain, nb);
goto unlock;
 }
@@ -1614,9 +1630,13 @@ int unregister_netdevice_notifier(struct notifier_block 
*nb)
call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
dev);
call_netdevice_notifier(nb, NETDEV_DOWN, dev);
+   call_netdevice_notifier(nb, NETDEV_DOWN_BATCH,
+   dev);
}
call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
}
+   call_netdevice_notifier(nb, NETDEV_UNREGISTER_BATCH,
+   net->loopback_dev);
}
 unlock:
rtnl_unlock();
@@ -6187,10 +6207,12 @@ void __dev_notify_flags(struct net_device *dev, 
unsigned int old_flags,
rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
 
if (changes & IFF_UP) {
-   if (dev->flags & IFF_UP)
+   if (dev->flags & IFF_UP) {
call_netdevice_notifiers(NETDEV_UP, dev);
-   else
+   } else {
call_netdevice_notifiers(NETDEV_DOWN, dev);
+   call_netdevice_notifiers(NETDEV_DOWN_BATCH, dev);
+   }
}
 
if (dev->flags & IFF_UP &&
@@ -6427,7 +6449,9 @@ static void net_set_todo(struct net_device *dev)
 static void rollback_registered_many(struct list_head *head)
 {
struct net_device *dev, *tmp;
+   struct net *net, *net_tmp;
LIST_HEAD(close_head);
+   LIST_HEAD(net_head);
 
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -6465,8 +6489,6 @@ static void rollback_registered_many(struct list_head 
*head)
synchronize_net();
 
list_for_each_entry(dev, head, unreg_list) {
-   struct sk_buff *skb = NULL;
-
/* Shutdown