RE: [PATCH 2/3] PCI: hv: serialize the present/eject work items

2018-03-04 Thread Dexuan Cui
> From: Michael Kelley (EOSG)
> Sent: Saturday, March 3, 2018 08:10
> > From: linux-kernel-ow...@vger.kernel.org  ow...@vger.kernel.org> On Behalf Of Dexuan Cui
> > Sent: Friday, March 2, 2018 4:21 PM
> > When we hot-remove the device, we first receive a PCI_EJECT message and
> > then receive a PCI_BUS_RELATIONS message with bus_rel->device_count ==
> 0.
> >
> > The first message is offloaded to hv_eject_device_work(), and the second
> > is offloaded to pci_devices_present_work(). Both the paths can be running
> > list_del(&hpdev->list_entry), causing general protection fault, because
> > system_wq can run them concurrently.
> >
> > The patch eliminates the race condition.
> 
> With this patch, the enum_sem field in struct hv_pcibus_device
> is no longer needed.  The semaphore serializes execution in
> hv_pci_devices_present_work(), and that serialization is now done
> with the ordered workqueue.  Also, the last paragraph of the top level
> comment for hv_pci_devices_present_work() should be updated to
> reflect the new ordering assumptions.

Thanks! I'll make an extra patch for this.

> Separately, an unrelated bug:  At the top of hv_eject_device_work(),
> the first test may do a put_pcichild() and return.  This exit path also
> needs to do put_hvpcibus() to balance the ref counts, or do a goto
> the last two lines at the bottom of the function.

When we're in hv_eject_device_work(), IMO hpdev->state must be 
hv_pcichild_ejecting, so I'm going to make a patch like this:

@@ -1867,10 +1867,7 @@ static void hv_eject_device_work(struct work_struct 
*work)

hpdev = container_of(work, struct hv_pci_dev, wrk);

-   if (hpdev->state != hv_pcichild_ejecting) {
-   put_pcichild(hpdev, hv_pcidev_ref_pnp);
-   return;
-   }
+   WARN_ON(hpdev->state != hv_pcichild_ejecting);

/*
 * Ejection can come before or after the PCI bus has been set up, so
 
> > @@ -1770,7 +1772,7 @@ static void hv_pci_devices_present(struct
> hv_pcibus_device *hbus,
> > spin_unlock_irqrestore(&hbus->device_list_lock, flags);
> >
> > get_hvpcibus(hbus);
> > -   schedule_work(&dr_wrk->wrk);
> > +   queue_work(hbus->wq, &dr_wrk->wrk);
> 
> This invocation of get_hvpcibus() and queue_work() could be made
> conditional on whether the preceding list_add_tail() transitioned
> the list from empty to non-empty.  If the list was already non-empty,
> a previously queued invocation of hv_pci_devices_present_work()
> will find the new entry and process it.   But this is an
> optimization in a non-perf sensitive code path, so may not be
> worth it.

Exactly. I'll add the the optimization.

> > @@ -1848,7 +1850,7 @@ static void hv_pci_eject_device(struct hv_pci_dev
> *hpdev)
> > get_pcichild(hpdev, hv_pcidev_ref_pnp);
> > INIT_WORK(&hpdev->wrk, hv_eject_device_work);
> > get_hvpcibus(hpdev->hbus);
> > -   schedule_work(&hpdev->wrk);
> > +   queue_work(hpdev->hbus->wq, &hpdev->wrk);
> >  }
> >
> >  /**
> > @@ -2463,11 +2465,17 @@ static int hv_pci_probe(struct hv_device *hdev,
> > spin_lock_init(&hbus->retarget_msi_interrupt_lock);
> > sema_init(&hbus->enum_sem, 1);
> > init_completion(&hbus->remove_event);
> > +   hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
> > +  hbus->sysdata.domain);
> > +   if (!hbus->wq) {
> > +   ret = -ENOMEM;
> > +   goto free_bus;
> > +   }
> >
> > ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
> >  hv_pci_onchannelcallback, hbus);
> > if (ret)
> > -   goto free_bus;
> > +   goto destroy_wq;
> >
> > hv_set_drvdata(hdev, hbus);
> >
> > @@ -2536,6 +2544,9 @@ static int hv_pci_probe(struct hv_device *hdev,
> > hv_free_config_window(hbus);
> >  close:
> > vmbus_close(hdev->channel);
> > +destroy_wq:
> > +   drain_workqueue(hbus->wq);
> 
> The drain_workqueue() call isn't necessary.  destroy_workqueue() calls
> drain_workqueue() and there better not be anything in the workqueue
> anyway since all the ref counts are zero.

OK. Will remove it.

> > +   destroy_workqueue(hbus->wq);
> >  free_bus:
> > free_page((unsigned long)hbus);
> > return ret;
> > @@ -2615,6 +2626,8 @@ static int hv_pci_remove(struct hv_device *hdev)
> > irq_domain_free_fwnode(hbus->sysdata.fwnode);
> > put_hvpcibus(hbus);
> > wait_for_completion(&hbus->remove_event);
> > +   drain_workqueue(hbus->wq);
> 
> Same here -- drain_workqueue() isn't needed.  The workqueue
> must be empty anyway since the remove_event has completed
> and the ref counts will all be zero.

Will remove it.

> > +   destroy_workqueue(hbus->wq);

I'm going to post a v2 patchset tomorrow.

Thanks,
-- Dexuan


RE: [PATCH 2/3] PCI: hv: serialize the present/eject work items

2018-03-03 Thread Michael Kelley (EOSG)
> -Original Message-
> From: linux-kernel-ow...@vger.kernel.org  
> On Behalf
> Of Dexuan Cui
> Sent: Friday, March 2, 2018 4:21 PM
> To: bhelg...@google.com; linux-...@vger.kernel.org; KY Srinivasan 
> ;
> Stephen Hemminger 
> Cc: linux-kernel@vger.kernel.org; driverdev-de...@linuxdriverproject.org; 
> Haiyang Zhang
> ; o...@aepfle.de; a...@canonical.com; 
> jasow...@redhat.com;
> vkuzn...@redhat.com; marcelo.ce...@canonical.com; Dexuan Cui 
> ;
> Jack Morgenstein ; sta...@vger.kernel.org
> Subject: [PATCH 2/3] PCI: hv: serialize the present/eject work items
> 
> When we hot-remove the device, we first receive a PCI_EJECT message and
> then receive a PCI_BUS_RELATIONS message with bus_rel->device_count == 0.
> 
> The first message is offloaded to hv_eject_device_work(), and the second
> is offloaded to pci_devices_present_work(). Both the paths can be running
> list_del(&hpdev->list_entry), causing general protection fault, because
> system_wq can run them concurrently.
> 
> The patch eliminates the race condition.

With this patch, the enum_sem field in struct hv_pcibus_device
is no longer needed.  The semaphore serializes execution in
hv_pci_devices_present_work(), and that serialization is now done
with the ordered workqueue.  Also, the last paragraph of the top level
comment for hv_pci_devices_present_work() should be updated to
reflect the new ordering assumptions.

Separately, an unrelated bug:  At the top of hv_eject_device_work(),
the first test may do a put_pcichild() and return.  This exit path also
needs to do put_hvpcibus() to balance the ref counts, or do a goto
the last two lines at the bottom of the function.

> 
> Signed-off-by: Dexuan Cui 
> Tested-by: Adrian Suhov 
> Tested-by: Chris Valean 
> Cc: Vitaly Kuznetsov 
> Cc: Jack Morgenstein 
> Cc: sta...@vger.kernel.org
> Cc: Stephen Hemminger 
> Cc: K. Y. Srinivasan 
> ---
>  drivers/pci/host/pci-hyperv.c | 19 ---
>  1 file changed, 16 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
> index 1233300f41c6..57b1fb3ebdb9 100644
> --- a/drivers/pci/host/pci-hyperv.c
> +++ b/drivers/pci/host/pci-hyperv.c
> @@ -461,6 +461,8 @@ struct hv_pcibus_device {
>   struct retarget_msi_interrupt retarget_msi_interrupt_params;
> 
>   spinlock_t retarget_msi_interrupt_lock;
> +
> + struct workqueue_struct *wq;
>  };
> 
>  /*
> @@ -1770,7 +1772,7 @@ static void hv_pci_devices_present(struct 
> hv_pcibus_device *hbus,
>   spin_unlock_irqrestore(&hbus->device_list_lock, flags);
> 
>   get_hvpcibus(hbus);
> - schedule_work(&dr_wrk->wrk);
> + queue_work(hbus->wq, &dr_wrk->wrk);

This invocation of get_hvpcibus() and queue_work() could be made
conditional on whether the preceding list_add_tail() transitioned
the list from empty to non-empty.  If the list was already non-empty,
a previously queued invocation of hv_pci_devices_present_work()
will find the new entry and process it.   But this is an
optimization in a non-perf sensitive code path, so may not be
worth it.

>  }
> 
>  /**
> @@ -1848,7 +1850,7 @@ static void hv_pci_eject_device(struct hv_pci_dev 
> *hpdev)
>   get_pcichild(hpdev, hv_pcidev_ref_pnp);
>   INIT_WORK(&hpdev->wrk, hv_eject_device_work);
>   get_hvpcibus(hpdev->hbus);
> - schedule_work(&hpdev->wrk);
> + queue_work(hpdev->hbus->wq, &hpdev->wrk);
>  }
> 
>  /**
> @@ -2463,11 +2465,17 @@ static int hv_pci_probe(struct hv_device *hdev,
>   spin_lock_init(&hbus->retarget_msi_interrupt_lock);
>   sema_init(&hbus->enum_sem, 1);
>   init_completion(&hbus->remove_event);
> + hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
> +hbus->sysdata.domain);
> + if (!hbus->wq) {
> + ret = -ENOMEM;
> + goto free_bus;
> + }
> 
>   ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
>hv_pci_onchannelcallback, hbus);
>   if (ret)
> - goto free_bus;
> + goto destroy_wq;
> 
>   hv_set_drvdata(hdev, hbus);
> 
> @@ -2536,6 +2544,9 @@ static int hv_pci_probe(struct hv_device *hdev,
>   hv_free_config_window(hbus);
>  close:
>   vmbus_close(hdev->channel);
> +destroy_wq:
> + drain_workqueue(hbus->wq);

The drain_workqueue() call isn't necessary.  destroy_workqueue() calls
drain_workqueue() and there better not be anything in the workqueue
anyway since all the ref counts are zero.

> + destroy_workqueue(hbus->wq);
>  free_bus:
>   free_pag

[PATCH 2/3] PCI: hv: serialize the present/eject work items

2018-03-02 Thread Dexuan Cui
When we hot-remove the device, we first receive a PCI_EJECT message and
then receive a PCI_BUS_RELATIONS message with bus_rel->device_count == 0.

The first message is offloaded to hv_eject_device_work(), and the second
is offloaded to pci_devices_present_work(). Both the paths can be running
list_del(&hpdev->list_entry), causing general protection fault, because
system_wq can run them concurrently.

The patch eliminates the race condition.

Signed-off-by: Dexuan Cui 
Tested-by: Adrian Suhov 
Tested-by: Chris Valean 
Cc: Vitaly Kuznetsov 
Cc: Jack Morgenstein 
Cc: sta...@vger.kernel.org
Cc: Stephen Hemminger 
Cc: K. Y. Srinivasan 
---
 drivers/pci/host/pci-hyperv.c | 19 ---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 1233300f41c6..57b1fb3ebdb9 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -461,6 +461,8 @@ struct hv_pcibus_device {
struct retarget_msi_interrupt retarget_msi_interrupt_params;
 
spinlock_t retarget_msi_interrupt_lock;
+
+   struct workqueue_struct *wq;
 };
 
 /*
@@ -1770,7 +1772,7 @@ static void hv_pci_devices_present(struct 
hv_pcibus_device *hbus,
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 
get_hvpcibus(hbus);
-   schedule_work(&dr_wrk->wrk);
+   queue_work(hbus->wq, &dr_wrk->wrk);
 }
 
 /**
@@ -1848,7 +1850,7 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
get_pcichild(hpdev, hv_pcidev_ref_pnp);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
get_hvpcibus(hpdev->hbus);
-   schedule_work(&hpdev->wrk);
+   queue_work(hpdev->hbus->wq, &hpdev->wrk);
 }
 
 /**
@@ -2463,11 +2465,17 @@ static int hv_pci_probe(struct hv_device *hdev,
spin_lock_init(&hbus->retarget_msi_interrupt_lock);
sema_init(&hbus->enum_sem, 1);
init_completion(&hbus->remove_event);
+   hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
+  hbus->sysdata.domain);
+   if (!hbus->wq) {
+   ret = -ENOMEM;
+   goto free_bus;
+   }
 
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
 hv_pci_onchannelcallback, hbus);
if (ret)
-   goto free_bus;
+   goto destroy_wq;
 
hv_set_drvdata(hdev, hbus);
 
@@ -2536,6 +2544,9 @@ static int hv_pci_probe(struct hv_device *hdev,
hv_free_config_window(hbus);
 close:
vmbus_close(hdev->channel);
+destroy_wq:
+   drain_workqueue(hbus->wq);
+   destroy_workqueue(hbus->wq);
 free_bus:
free_page((unsigned long)hbus);
return ret;
@@ -2615,6 +2626,8 @@ static int hv_pci_remove(struct hv_device *hdev)
irq_domain_free_fwnode(hbus->sysdata.fwnode);
put_hvpcibus(hbus);
wait_for_completion(&hbus->remove_event);
+   drain_workqueue(hbus->wq);
+   destroy_workqueue(hbus->wq);
free_page((unsigned long)hbus);
return 0;
 }
-- 
2.7.4