Re: [PATCH net-next] liquidio: allocate RX buffers in OOM conditions in PF and VF

2017-03-23 Thread Burla, Satananda
The 03/22/2017 19:37, David Miller wrote:
> From: Felix Manlunas 
> Date: Wed, 22 Mar 2017 11:31:13 -0700
> 
> > From: Satanand Burla 
> >
> > Add workqueue that is periodically run to try to allocate RX buffers in OOM
> > conditions in PF and VF.
> >
> > Signed-off-by: Satanand Burla 
> > Signed-off-by: Felix Manlunas 
> 
> Applied, but I'm really not so sure you want to poll these queue states
> 4 times a second all the time.
> 
> Why don't you trigger the workqueue when you actually get an allocation
> failure?
That is certainly a better option. We will incorporate that in the
coming series.
-- 
Thanks
Satanand


Re: [PATCH net-next] liquidio: allocate RX buffers in OOM conditions in PF and VF

2017-03-22 Thread David Miller
From: Felix Manlunas 
Date: Wed, 22 Mar 2017 11:31:13 -0700

> From: Satanand Burla 
> 
> Add workqueue that is periodically run to try to allocate RX buffers in OOM
> conditions in PF and VF.
> 
> Signed-off-by: Satanand Burla 
> Signed-off-by: Felix Manlunas 

Applied, but I'm really not so sure you want to poll these queue states
4 times a second all the time.

Why don't you trigger the workqueue when you actually get an allocation
failure?


[PATCH net-next] liquidio: allocate RX buffers in OOM conditions in PF and VF

2017-03-22 Thread Felix Manlunas
From: Satanand Burla 

Add workqueue that is periodically run to try to allocate RX buffers in OOM
conditions in PF and VF.

Signed-off-by: Satanand Burla 
Signed-off-by: Felix Manlunas 
---
 .../ethernet/cavium/liquidio/cn23xx_pf_device.h|  2 +
 drivers/net/ethernet/cavium/liquidio/lio_core.c| 56 ++
 drivers/net/ethernet/cavium/liquidio/lio_main.c|  5 ++
 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c |  5 ++
 drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 26 ++
 drivers/net/ethernet/cavium/liquidio/octeon_droq.h |  2 +
 .../net/ethernet/cavium/liquidio/octeon_network.h  |  7 +++
 7 files changed, 103 insertions(+)

diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h 
b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 2fedd91..dee6046 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -43,6 +43,8 @@ struct octeon_cn23xx_pf {
struct octeon_config *conf;
 };
 
+#define CN23XX_SLI_DEF_BP  0x40
+
 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
 
 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c 
b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 65a1a9e..08676df 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -26,6 +26,9 @@
 #include "octeon_main.h"
 #include "octeon_network.h"
 
+/* OOM task polling interval */
+#define LIO_OOM_POLL_INTERVAL_MS 250
+
 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
 {
struct lio *lio = GET_LIO(netdev);
@@ -293,3 +296,56 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device 
*oct, u8 *mac)
 * the PF did that already
 */
 }
+
+static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
+{
+   struct cavium_wk *wk = (struct cavium_wk *)work;
+   struct lio *lio = (struct lio *)wk->ctxptr;
+   struct octeon_device *oct = lio->oct_dev;
+   struct octeon_droq *droq;
+   int q, q_no = 0;
+
+   if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+   for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+   q_no = lio->linfo.rxpciq[q].s.q_no;
+   droq = oct->droq[q_no];
+   if (!droq)
+   continue;
+   octeon_droq_check_oom(droq);
+   }
+   }
+   queue_delayed_work(lio->rxq_status_wq.wq,
+  >rxq_status_wq.wk.work,
+  msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
+int setup_rx_oom_poll_fn(struct net_device *netdev)
+{
+   struct lio *lio = GET_LIO(netdev);
+   struct octeon_device *oct = lio->oct_dev;
+
+   lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
+   WQ_MEM_RECLAIM, 0);
+   if (!lio->rxq_status_wq.wq) {
+   dev_err(>pci_dev->dev, "unable to create cavium rxq oom 
status wq\n");
+   return -ENOMEM;
+   }
+   INIT_DELAYED_WORK(>rxq_status_wq.wk.work,
+ octnet_poll_check_rxq_oom_status);
+   lio->rxq_status_wq.wk.ctxptr = lio;
+   queue_delayed_work(lio->rxq_status_wq.wq,
+  >rxq_status_wq.wk.work,
+  msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+   return 0;
+}
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev)
+{
+   struct lio *lio = GET_LIO(netdev);
+
+   if (lio->rxq_status_wq.wq) {
+   cancel_delayed_work_sync(>rxq_status_wq.wk.work);
+   flush_workqueue(lio->rxq_status_wq.wq);
+   destroy_workqueue(lio->rxq_status_wq.wq);
+   }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c 
b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b23485c..5298a79 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1672,6 +1672,8 @@ static void liquidio_destroy_nic_device(struct 
octeon_device *oct, int ifidx)
 
cleanup_link_status_change_wq(netdev);
 
+   cleanup_rx_oom_poll_fn(netdev);
+
delete_glists(lio);
 
free_netdev(netdev);
@@ -4146,6 +4148,9 @@ static int setup_nic_devices(struct octeon_device 
*octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
 
+   if (setup_rx_oom_poll_fn(netdev))
+   goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(_dev->pci_dev->dev, "Device registration 
failed\n");
diff --git