Re: [PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2014-01-17 Thread David Miller
From: Christoph Lameter 
Date: Fri, 17 Jan 2014 09:18:36 -0600

> [Patch depends on another patch in this series that introduces raw_cpu_ops]
> 
> Replace uses of get_cpu_var for address calculation through this_cpu_ptr.
> 
> Cc: "David S. Miller" 
> Cc: net...@vger.kernel.org
> Cc: Eric Dumazet 
> Signed-off-by: Christoph Lameter 

Acked-by: David S. Miller 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2014-01-17 Thread Christoph Lameter
[Patch depends on another patch in this series that introduces raw_cpu_ops]

Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: "David S. Miller" 
Cc: net...@vger.kernel.org
Cc: Eric Dumazet 
Signed-off-by: Christoph Lameter 

Index: linux/net/core/dev.c
===
--- linux.orig/net/core/dev.c   2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/dev.c2013-12-02 16:07:45.254759699 -0600
@@ -2130,7 +2130,7 @@ static inline void __netif_reschedule(st
unsigned long flags;
 
local_irq_save(flags);
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = >next_sched;
@@ -2152,7 +2152,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
unsigned long flags;
 
local_irq_save(flags);
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
skb->next = sd->completion_queue;
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -3122,7 +3122,7 @@ static void rps_trigger_softirq(void *da
 static int rps_ipi_queued(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-   struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+   struct softnet_data *mysd = this_cpu_ptr(_data);
 
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
@@ -3149,7 +3149,7 @@ static bool skb_flow_limit(struct sk_buf
if (qlen < (netdev_max_backlog >> 1))
return false;
 
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
 
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL(netif_rx_ni);
 
 static void net_tx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
 
if (sd->completion_queue) {
struct sk_buff *clist;
@@ -3711,7 +3711,7 @@ EXPORT_SYMBOL(netif_receive_skb);
 static void flush_backlog(void *arg)
 {
struct net_device *dev = arg;
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
struct sk_buff *skb, *tmp;
 
rps_lock(sd);
@@ -4157,7 +4157,7 @@ void __napi_schedule(struct napi_struct
unsigned long flags;
 
local_irq_save(flags);
-   napi_schedule(&__get_cpu_var(softnet_data), n);
+   napi_schedule(this_cpu_ptr(_data), n);
local_irq_restore(flags);
 }
 EXPORT_SYMBOL(__napi_schedule);
@@ -4285,7 +4285,7 @@ EXPORT_SYMBOL(netif_napi_del);
 
 static void net_rx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
Index: linux/net/core/drop_monitor.c
===
--- linux.orig/net/core/drop_monitor.c  2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/drop_monitor.c   2013-12-02 16:07:45.254759699 -0600
@@ -147,7 +147,7 @@ static void trace_drop_common(struct sk_
unsigned long flags;
 
local_irq_save(flags);
-   data = &__get_cpu_var(dm_cpu_data);
+   data = this_cpu_ptr(_cpu_data);
spin_lock(>lock);
dskb = data->skb;
 
Index: linux/net/core/skbuff.c
===
--- linux.orig/net/core/skbuff.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/skbuff.c 2013-12-02 16:07:45.254759699 -0600
@@ -371,7 +371,7 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
 
local_irq_save(flags);
-   nc = &__get_cpu_var(netdev_alloc_cache);
+   nc = this_cpu_ptr(_alloc_cache);
if (unlikely(!nc->frag.page)) {
 refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
Index: linux/net/ipv4/tcp_output.c
===
--- linux.orig/net/ipv4/tcp_output.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv4/tcp_output.c 2013-12-02 16:07:45.254759699 -0600
@@ -815,7 +815,7 @@ void tcp_wfree(struct sk_buff *skb)
 
/* queue this socket to tasklet queue */
local_irq_save(flags);
-   tsq = &__get_cpu_var(tsq_tasklet);
+   tsq = this_cpu_ptr(_tasklet);
list_add(>tsq_node, >head);
tasklet_schedule(>tasklet);
local_irq_restore(flags);
Index: linux/net/ipv6/syncookies.c
===
--- linux.orig/net/ipv6/syncookies.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv6/syncookies.c 

[PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2014-01-17 Thread Christoph Lameter
[Patch depends on another patch in this series that introduces raw_cpu_ops]

Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: David S. Miller da...@davemloft.net
Cc: net...@vger.kernel.org
Cc: Eric Dumazet eduma...@google.com
Signed-off-by: Christoph Lameter c...@linux.com

Index: linux/net/core/dev.c
===
--- linux.orig/net/core/dev.c   2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/dev.c2013-12-02 16:07:45.254759699 -0600
@@ -2130,7 +2130,7 @@ static inline void __netif_reschedule(st
unsigned long flags;
 
local_irq_save(flags);
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
q-next_sched = NULL;
*sd-output_queue_tailp = q;
sd-output_queue_tailp = q-next_sched;
@@ -2152,7 +2152,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
unsigned long flags;
 
local_irq_save(flags);
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
skb-next = sd-completion_queue;
sd-completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -3122,7 +3122,7 @@ static void rps_trigger_softirq(void *da
 static int rps_ipi_queued(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-   struct softnet_data *mysd = __get_cpu_var(softnet_data);
+   struct softnet_data *mysd = this_cpu_ptr(softnet_data);
 
if (sd != mysd) {
sd-rps_ipi_next = mysd-rps_ipi_list;
@@ -3149,7 +3149,7 @@ static bool skb_flow_limit(struct sk_buf
if (qlen  (netdev_max_backlog  1))
return false;
 
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
 
rcu_read_lock();
fl = rcu_dereference(sd-flow_limit);
@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL(netif_rx_ni);
 
 static void net_tx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
 
if (sd-completion_queue) {
struct sk_buff *clist;
@@ -3711,7 +3711,7 @@ EXPORT_SYMBOL(netif_receive_skb);
 static void flush_backlog(void *arg)
 {
struct net_device *dev = arg;
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
struct sk_buff *skb, *tmp;
 
rps_lock(sd);
@@ -4157,7 +4157,7 @@ void __napi_schedule(struct napi_struct
unsigned long flags;
 
local_irq_save(flags);
-   napi_schedule(__get_cpu_var(softnet_data), n);
+   napi_schedule(this_cpu_ptr(softnet_data), n);
local_irq_restore(flags);
 }
 EXPORT_SYMBOL(__napi_schedule);
@@ -4285,7 +4285,7 @@ EXPORT_SYMBOL(netif_napi_del);
 
 static void net_rx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
Index: linux/net/core/drop_monitor.c
===
--- linux.orig/net/core/drop_monitor.c  2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/drop_monitor.c   2013-12-02 16:07:45.254759699 -0600
@@ -147,7 +147,7 @@ static void trace_drop_common(struct sk_
unsigned long flags;
 
local_irq_save(flags);
-   data = __get_cpu_var(dm_cpu_data);
+   data = this_cpu_ptr(dm_cpu_data);
spin_lock(data-lock);
dskb = data-skb;
 
Index: linux/net/core/skbuff.c
===
--- linux.orig/net/core/skbuff.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/skbuff.c 2013-12-02 16:07:45.254759699 -0600
@@ -371,7 +371,7 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
 
local_irq_save(flags);
-   nc = __get_cpu_var(netdev_alloc_cache);
+   nc = this_cpu_ptr(netdev_alloc_cache);
if (unlikely(!nc-frag.page)) {
 refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
Index: linux/net/ipv4/tcp_output.c
===
--- linux.orig/net/ipv4/tcp_output.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv4/tcp_output.c 2013-12-02 16:07:45.254759699 -0600
@@ -815,7 +815,7 @@ void tcp_wfree(struct sk_buff *skb)
 
/* queue this socket to tasklet queue */
local_irq_save(flags);
-   tsq = __get_cpu_var(tsq_tasklet);
+   tsq = this_cpu_ptr(tsq_tasklet);
list_add(tp-tsq_node, tsq-head);
tasklet_schedule(tsq-tasklet);
local_irq_restore(flags);
Index: linux/net/ipv6/syncookies.c
===
--- 

Re: [PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2014-01-17 Thread David Miller
From: Christoph Lameter c...@linux.com
Date: Fri, 17 Jan 2014 09:18:36 -0600

 [Patch depends on another patch in this series that introduces raw_cpu_ops]
 
 Replace uses of get_cpu_var for address calculation through this_cpu_ptr.
 
 Cc: David S. Miller da...@davemloft.net
 Cc: net...@vger.kernel.org
 Cc: Eric Dumazet eduma...@google.com
 Signed-off-by: Christoph Lameter c...@linux.com

Acked-by: David S. Miller da...@davemloft.net
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2013-12-03 Thread Christoph Lameter
Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: "David S. Miller" 
Cc: net...@vger.kernel.org
Cc: Eric Dumazet 
Signed-off-by: Christoph Lameter 

Index: linux/net/core/dev.c
===
--- linux.orig/net/core/dev.c   2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/dev.c2013-12-02 16:07:45.254759699 -0600
@@ -2130,7 +2130,7 @@ static inline void __netif_reschedule(st
unsigned long flags;
 
local_irq_save(flags);
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = >next_sched;
@@ -2152,7 +2152,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
unsigned long flags;
 
local_irq_save(flags);
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
skb->next = sd->completion_queue;
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -3122,7 +3122,7 @@ static void rps_trigger_softirq(void *da
 static int rps_ipi_queued(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-   struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+   struct softnet_data *mysd = this_cpu_ptr(_data);
 
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
@@ -3149,7 +3149,7 @@ static bool skb_flow_limit(struct sk_buf
if (qlen < (netdev_max_backlog >> 1))
return false;
 
-   sd = &__get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(_data);
 
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL(netif_rx_ni);
 
 static void net_tx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
 
if (sd->completion_queue) {
struct sk_buff *clist;
@@ -3711,7 +3711,7 @@ EXPORT_SYMBOL(netif_receive_skb);
 static void flush_backlog(void *arg)
 {
struct net_device *dev = arg;
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
struct sk_buff *skb, *tmp;
 
rps_lock(sd);
@@ -4157,7 +4157,7 @@ void __napi_schedule(struct napi_struct
unsigned long flags;
 
local_irq_save(flags);
-   napi_schedule(&__get_cpu_var(softnet_data), n);
+   napi_schedule(this_cpu_ptr(_data), n);
local_irq_restore(flags);
 }
 EXPORT_SYMBOL(__napi_schedule);
@@ -4285,7 +4285,7 @@ EXPORT_SYMBOL(netif_napi_del);
 
 static void net_rx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = &__get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
Index: linux/net/core/drop_monitor.c
===
--- linux.orig/net/core/drop_monitor.c  2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/drop_monitor.c   2013-12-02 16:07:45.254759699 -0600
@@ -147,7 +147,7 @@ static void trace_drop_common(struct sk_
unsigned long flags;
 
local_irq_save(flags);
-   data = &__get_cpu_var(dm_cpu_data);
+   data = this_cpu_ptr(_cpu_data);
spin_lock(>lock);
dskb = data->skb;
 
Index: linux/net/core/skbuff.c
===
--- linux.orig/net/core/skbuff.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/skbuff.c 2013-12-02 16:07:45.254759699 -0600
@@ -371,7 +371,7 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
 
local_irq_save(flags);
-   nc = &__get_cpu_var(netdev_alloc_cache);
+   nc = this_cpu_ptr(_alloc_cache);
if (unlikely(!nc->frag.page)) {
 refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
Index: linux/net/ipv4/tcp_output.c
===
--- linux.orig/net/ipv4/tcp_output.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv4/tcp_output.c 2013-12-02 16:07:45.254759699 -0600
@@ -815,7 +815,7 @@ void tcp_wfree(struct sk_buff *skb)
 
/* queue this socket to tasklet queue */
local_irq_save(flags);
-   tsq = &__get_cpu_var(tsq_tasklet);
+   tsq = this_cpu_ptr(_tasklet);
list_add(>tsq_node, >head);
tasklet_schedule(>tasklet);
local_irq_restore(flags);
Index: linux/net/ipv6/syncookies.c
===
--- linux.orig/net/ipv6/syncookies.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv6/syncookies.c 2013-12-02 16:07:45.254759699 -0600
@@ -67,7 +67,7 @@ static u32 

[PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr

2013-12-03 Thread Christoph Lameter
Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: David S. Miller da...@davemloft.net
Cc: net...@vger.kernel.org
Cc: Eric Dumazet eduma...@google.com
Signed-off-by: Christoph Lameter c...@linux.com

Index: linux/net/core/dev.c
===
--- linux.orig/net/core/dev.c   2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/dev.c2013-12-02 16:07:45.254759699 -0600
@@ -2130,7 +2130,7 @@ static inline void __netif_reschedule(st
unsigned long flags;
 
local_irq_save(flags);
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
q-next_sched = NULL;
*sd-output_queue_tailp = q;
sd-output_queue_tailp = q-next_sched;
@@ -2152,7 +2152,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
unsigned long flags;
 
local_irq_save(flags);
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
skb-next = sd-completion_queue;
sd-completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -3122,7 +3122,7 @@ static void rps_trigger_softirq(void *da
 static int rps_ipi_queued(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-   struct softnet_data *mysd = __get_cpu_var(softnet_data);
+   struct softnet_data *mysd = this_cpu_ptr(softnet_data);
 
if (sd != mysd) {
sd-rps_ipi_next = mysd-rps_ipi_list;
@@ -3149,7 +3149,7 @@ static bool skb_flow_limit(struct sk_buf
if (qlen  (netdev_max_backlog  1))
return false;
 
-   sd = __get_cpu_var(softnet_data);
+   sd = this_cpu_ptr(softnet_data);
 
rcu_read_lock();
fl = rcu_dereference(sd-flow_limit);
@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL(netif_rx_ni);
 
 static void net_tx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
 
if (sd-completion_queue) {
struct sk_buff *clist;
@@ -3711,7 +3711,7 @@ EXPORT_SYMBOL(netif_receive_skb);
 static void flush_backlog(void *arg)
 {
struct net_device *dev = arg;
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
struct sk_buff *skb, *tmp;
 
rps_lock(sd);
@@ -4157,7 +4157,7 @@ void __napi_schedule(struct napi_struct
unsigned long flags;
 
local_irq_save(flags);
-   napi_schedule(__get_cpu_var(softnet_data), n);
+   napi_schedule(this_cpu_ptr(softnet_data), n);
local_irq_restore(flags);
 }
 EXPORT_SYMBOL(__napi_schedule);
@@ -4285,7 +4285,7 @@ EXPORT_SYMBOL(netif_napi_del);
 
 static void net_rx_action(struct softirq_action *h)
 {
-   struct softnet_data *sd = __get_cpu_var(softnet_data);
+   struct softnet_data *sd = this_cpu_ptr(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
Index: linux/net/core/drop_monitor.c
===
--- linux.orig/net/core/drop_monitor.c  2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/drop_monitor.c   2013-12-02 16:07:45.254759699 -0600
@@ -147,7 +147,7 @@ static void trace_drop_common(struct sk_
unsigned long flags;
 
local_irq_save(flags);
-   data = __get_cpu_var(dm_cpu_data);
+   data = this_cpu_ptr(dm_cpu_data);
spin_lock(data-lock);
dskb = data-skb;
 
Index: linux/net/core/skbuff.c
===
--- linux.orig/net/core/skbuff.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/core/skbuff.c 2013-12-02 16:07:45.254759699 -0600
@@ -371,7 +371,7 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
 
local_irq_save(flags);
-   nc = __get_cpu_var(netdev_alloc_cache);
+   nc = this_cpu_ptr(netdev_alloc_cache);
if (unlikely(!nc-frag.page)) {
 refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
Index: linux/net/ipv4/tcp_output.c
===
--- linux.orig/net/ipv4/tcp_output.c2013-12-02 16:07:45.264759422 -0600
+++ linux/net/ipv4/tcp_output.c 2013-12-02 16:07:45.254759699 -0600
@@ -815,7 +815,7 @@ void tcp_wfree(struct sk_buff *skb)
 
/* queue this socket to tasklet queue */
local_irq_save(flags);
-   tsq = __get_cpu_var(tsq_tasklet);
+   tsq = this_cpu_ptr(tsq_tasklet);
list_add(tp-tsq_node, tsq-head);
tasklet_schedule(tsq-tasklet);
local_irq_restore(flags);
Index: linux/net/ipv6/syncookies.c
===
--- linux.orig/net/ipv6/syncookies.c2013-12-02 16:07:45.264759422 -0600
+++