Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-14 Thread Hannes Reinecke
On 07/10/2017 09:06 AM, Yijing Wang wrote:
> Now libsas hotplug work is static, every sas event type has its own
> static work, LLDD driver queue the hotplug work into shost->work_q.
> If LLDD driver burst post lots hotplug events to libsas, the hotplug
> events may pending in the workqueue like
> 
> shost->work_q
> new work[PORTE_BYTES_DMAED] --> |[PHYE_LOSS_OF_SIGNAL][PORTE_BYTES_DMAED] -> 
> processing
> |<---wait worker to process>|
> In this case, a new PORTE_BYTES_DMAED event coming, libsas try to queue it
> to shost->work_q, but this work is already pending, so it would be lost.
> Finally, libsas delete the related sas port and sas devices, but LLDD driver
> expect libsas add the sas port and devices(last sas event).
> 
> This patch and use static sas event work pool to appease this issue, since
> it's static work pool, it won't make memory exhaust.
> 
> Signed-off-by: Yijing Wang 
> CC: John Garry 
> CC: Johannes Thumshirn 
> CC: Ewan Milne 
> CC: Christoph Hellwig 
> CC: Tomas Henzl 
> CC: Dan Williams 
> ---
>  drivers/scsi/libsas/sas_event.c| 208 
> -
>  drivers/scsi/libsas/sas_init.c |   6 --
>  drivers/scsi/libsas/sas_internal.h |   3 +
>  drivers/scsi/libsas/sas_phy.c  |  48 +++--
>  drivers/scsi/libsas/sas_port.c |  18 ++--
>  include/scsi/libsas.h  |  16 +--
>  6 files changed, 216 insertions(+), 83 deletions(-)
> 
> diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
> index c0d0d97..a1370bd 100644
> --- a/drivers/scsi/libsas/sas_event.c
> +++ b/drivers/scsi/libsas/sas_event.c
> @@ -27,13 +27,20 @@
>  #include "sas_internal.h"
>  #include "sas_dump.h"
>  
> +static DEFINE_SPINLOCK(sas_event_lock);
> +
> +static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
> +[HAE_RESET] = sas_hae_reset,
> +};
> +
>  int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
>  {
>   int rc = 0;
>  
>   if (!test_bit(SAS_HA_REGISTERED, >state))
> - return 0;
> + return rc;
>  
> + rc = 1;
>   if (test_bit(SAS_HA_DRAINING, >state)) {
>   /* add it to the defer list, if not already pending */
>   if (list_empty(>drain_node))
> @@ -44,19 +51,15 @@ int sas_queue_work(struct sas_ha_struct *ha, struct 
> sas_work *sw)
>   return rc;
>  }
>  
> -static int sas_queue_event(int event, unsigned long *pending,
> - struct sas_work *work,
> +static int sas_queue_event(int event, struct sas_work *work,
>   struct sas_ha_struct *ha)
>  {
>   int rc = 0;
> + unsigned long flags;
>  
> - if (!test_and_set_bit(event, pending)) {
> - unsigned long flags;
> -
> - spin_lock_irqsave(>lock, flags);
> - rc = sas_queue_work(ha, work);
> - spin_unlock_irqrestore(>lock, flags);
> - }
> + spin_lock_irqsave(>lock, flags);
> + rc = sas_queue_work(ha, work);
> + spin_unlock_irqrestore(>lock, flags);
>  
>   return rc;
>  }
> @@ -64,6 +67,8 @@ static int sas_queue_event(int event, unsigned long 
> *pending,
>  
>  void __sas_drain_work(struct sas_ha_struct *ha)
>  {
> + int ret;
> + unsigned long flags;
>   struct workqueue_struct *wq = ha->core.shost->work_q;
>   struct sas_work *sw, *_sw;
>  
> @@ -78,7 +83,12 @@ void __sas_drain_work(struct sas_ha_struct *ha)
>   clear_bit(SAS_HA_DRAINING, >state);
>   list_for_each_entry_safe(sw, _sw, >defer_q, drain_node) {
>   list_del_init(>drain_node);
> - sas_queue_work(ha, sw);
> + ret = sas_queue_work(ha, sw);
> + if (ret != 1) {
> + spin_lock_irqsave(_event_lock, flags);
> + sw->used = false;
> + spin_unlock_irqrestore(_event_lock, flags);
> + }
>   }
>   spin_unlock_irq(>lock);
>  }
> @@ -119,51 +129,197 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
>   if (!test_and_clear_bit(ev, >pending))
>   continue;
>  
> - sas_queue_event(ev, >pending, >disc_work[ev].work, ha);
> + sas_queue_event(ev, >disc_work[ev].work, ha);
>   }
>   mutex_unlock(>disco_mutex);
>  }
>  
> +static void sas_free_ha_event(struct sas_ha_event *event)
> +{
> + unsigned long flags;
> + spin_lock_irqsave(_event_lock, flags);
> + event->work.used = false;
> + spin_unlock_irqrestore(_event_lock, flags);
> +}
> +
> +static void sas_free_port_event(struct asd_sas_event *event)
> +{
> + unsigned long flags;
> + spin_lock_irqsave(_event_lock, flags);
> + event->work.used = false;
> + spin_unlock_irqrestore(_event_lock, flags);
> +}
> +
> +static void sas_free_phy_event(struct 

Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-12 Thread wangyijing
 There is no special meaning for the pool size, if flutter of > 25 events, 
 notify sas events will return error, and the further step work is 
 depending on LLDD drivers.
 I hope libsas could do more work in this case, but now it seems a little 
 difficult, this patch may be a interim fix, until we find a perfect 
 solution.
>>>
>>> The principal of having a fixed-sized pool is ok, even though the pool size 
>>> needs more consideration.
>>>
>>> However my issue is how to handle pool exhaustion. For a start, relaying 
>>> info to the LLDD that the event notification failed is probably not the way 
>>> to go. I only now noticed "scsi: sas: scsi_queue_work can fail, so make 
>>> callers aware" made it into the kernel; as I mentioned in response to this 
>>> patch, the LLDD does not know how to handle this (and no LLDDs do actually 
>>> handle this).
>>>
>>> I would say it is better to shut down the PHY from libsas (As Dan mentioned 
>>> in the v1 series) when the pool exhausts, under the assumption that the PHY 
>>> has gone into some erroneous state. The user can later re-enable the PHY 
>>> from sysfs, if required.
>>
>> I considered this suggestion, and what I am worried about are, first if we 
>> disable phy once the sas event pool exhausts, it may hurt the pending sas 
>> event process which has been queued,
> 
> I don't see how it affects currently queued events - they should just be 
> processed normally. As for LLDD reporting events when the pool is exhausted, 
> they are just lost.

So if we disable a phy, it's nothing affect to the already queued sas event 
process, which including access the phy to find target device ?

> 
>> second, if phy was disabled, and no one trigger the reenable by sysfs, the 
>> LLDD has no way to post new sas phy events.
> 
> For the extreme scenario of pool becoming exhausted and PHY being disabled, 
> it should remain disabled until user takes some action to fix originating 
> problem.

So we should print explicit message to tell user what's happen and how to fix 
it.

Thanks!
Yijing.

> 
>>
>> Thanks!
>> Yijing.
>>
>>>
>>> Much appreciated,
>>> John
>>>

 Thanks!
 Yijing.

>
> Thanks,
> John
>
>
> .
>


 .

>>>
>>>
>>>
>>> .
>>>
>>
>>
>> .
>>
> 
> 
> 
> .
> 



Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-12 Thread John Garry

On 12/07/2017 09:47, wangyijing wrote:



在 2017/7/12 16:17, John Garry 写道:

On 12/07/2017 03:06, wangyijing wrote:

-unsigned long port_events_pending;
-unsigned long phy_events_pending;
+struct asd_sas_event   port_events[PORT_POOL_SIZE];
+struct asd_sas_event   phy_events[PHY_POOL_SIZE];

 int error;


Hi Yijing,

So now we are creating a static pool of events per PHY/port, instead of having 
1 static work struct per event per PHY/port. So, for sure, this avoids the 
dynamic event issue of system memory exhaustion which we discussed in v1+v2 
series. And it seems to possibly remove issue of losing SAS events.

But how did you determine the pool size for a PHY/port? It would seem to be 5 * 
#phy events or #port events (which is also 5, I figure by coincidence). How does 
this deal with flutter of >25 events?


There is no special meaning for the pool size, if flutter of > 25 events, 
notify sas events will return error, and the further step work is depending on 
LLDD drivers.
I hope libsas could do more work in this case, but now it seems a little 
difficult, this patch may be a interim fix, until we find a perfect solution.


The principal of having a fixed-sized pool is ok, even though the pool size 
needs more consideration.

However my issue is how to handle pool exhaustion. For a start, relaying info to the LLDD 
that the event notification failed is probably not the way to go. I only now noticed 
"scsi: sas: scsi_queue_work can fail, so make callers aware" made it into the 
kernel; as I mentioned in response to this patch, the LLDD does not know how to handle 
this (and no LLDDs do actually handle this).

I would say it is better to shut down the PHY from libsas (As Dan mentioned in 
the v1 series) when the pool exhausts, under the assumption that the PHY has 
gone into some erroneous state. The user can later re-enable the PHY from 
sysfs, if required.


I considered this suggestion, and what I am worried about are, first if we 
disable phy once the sas event pool exhausts, it may hurt the pending sas event 
process which has been queued,


I don't see how it affects currently queued events - they should just be 
processed normally. As for LLDD reporting events when the pool is 
exhausted, they are just lost.



second, if phy was disabled, and no one trigger the reenable by sysfs, the LLDD 
has no way to post new sas phy events.


For the extreme scenario of pool becoming exhausted and PHY being 
disabled, it should remain disabled until user takes some action to fix 
originating problem.




Thanks!
Yijing.



Much appreciated,
John



Thanks!
Yijing.



Thanks,
John


.




.





.




.






Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-12 Thread wangyijing


在 2017/7/12 16:17, John Garry 写道:
> On 12/07/2017 03:06, wangyijing wrote:
 -unsigned long port_events_pending;
 -unsigned long phy_events_pending;
 +struct asd_sas_event   port_events[PORT_POOL_SIZE];
 +struct asd_sas_event   phy_events[PHY_POOL_SIZE];

  int error;
>>>
>>> Hi Yijing,
>>>
>>> So now we are creating a static pool of events per PHY/port, instead of 
>>> having 1 static work struct per event per PHY/port. So, for sure, this 
>>> avoids the dynamic event issue of system memory exhaustion which we 
>>> discussed in v1+v2 series. And it seems to possibly remove issue of losing 
>>> SAS events.
>>>
>>> But how did you determine the pool size for a PHY/port? It would seem to be 
>>> 5 * #phy events or #port events (which is also 5, I figure by coincidence). 
>>> How does this deal with flutter of >25 events?
>>
>> There is no special meaning for the pool size, if flutter of > 25 events, 
>> notify sas events will return error, and the further step work is depending 
>> on LLDD drivers.
>> I hope libsas could do more work in this case, but now it seems a little 
>> difficult, this patch may be a interim fix, until we find a perfect solution.
> 
> The principal of having a fixed-sized pool is ok, even though the pool size 
> needs more consideration.
> 
> However my issue is how to handle pool exhaustion. For a start, relaying info 
> to the LLDD that the event notification failed is probably not the way to go. 
> I only now noticed "scsi: sas: scsi_queue_work can fail, so make callers 
> aware" made it into the kernel; as I mentioned in response to this patch, the 
> LLDD does not know how to handle this (and no LLDDs do actually handle this).
> 
> I would say it is better to shut down the PHY from libsas (As Dan mentioned 
> in the v1 series) when the pool exhausts, under the assumption that the PHY 
> has gone into some erroneous state. The user can later re-enable the PHY from 
> sysfs, if required.

I considered this suggestion, and what I am worried about are, first if we 
disable phy once the sas event pool exhausts, it may hurt the pending sas event 
process which has been queued,
second, if phy was disabled, and no one trigger the reenable by sysfs, the LLDD 
has no way to post new sas phy events.

Thanks!
Yijing.

> 
> Much appreciated,
> John
> 
>>
>> Thanks!
>> Yijing.
>>
>>>
>>> Thanks,
>>> John
>>>
>>>
>>> .
>>>
>>
>>
>> .
>>
> 
> 
> 
> .
> 



Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-12 Thread John Garry

On 12/07/2017 03:06, wangyijing wrote:

-unsigned long port_events_pending;
-unsigned long phy_events_pending;
+struct asd_sas_event   port_events[PORT_POOL_SIZE];
+struct asd_sas_event   phy_events[PHY_POOL_SIZE];

 int error;


Hi Yijing,

So now we are creating a static pool of events per PHY/port, instead of having 
1 static work struct per event per PHY/port. So, for sure, this avoids the 
dynamic event issue of system memory exhaustion which we discussed in v1+v2 
series. And it seems to possibly remove issue of losing SAS events.

But how did you determine the pool size for a PHY/port? It would seem to be 5 * 
#phy events or #port events (which is also 5, I figure by coincidence). How does 
this deal with flutter of >25 events?


There is no special meaning for the pool size, if flutter of > 25 events, 
notify sas events will return error, and the further step work is depending on 
LLDD drivers.
I hope libsas could do more work in this case, but now it seems a little 
difficult, this patch may be a interim fix, until we find a perfect solution.


The principal of having a fixed-sized pool is ok, even though the pool 
size needs more consideration.


However my issue is how to handle pool exhaustion. For a start, relaying 
info to the LLDD that the event notification failed is probably not the 
way to go. I only now noticed "scsi: sas: scsi_queue_work can fail, so 
make callers aware" made it into the kernel; as I mentioned in response 
to this patch, the LLDD does not know how to handle this (and no LLDDs 
do actually handle this).


I would say it is better to shut down the PHY from libsas (As Dan 
mentioned in the v1 series) when the pool exhausts, under the assumption 
that the PHY has gone into some erroneous state. The user can later 
re-enable the PHY from sysfs, if required.


Much appreciated,
John



Thanks!
Yijing.



Thanks,
John


.




.






Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-11 Thread wangyijing
>> -unsigned long port_events_pending;
>> -unsigned long phy_events_pending;
>> +struct asd_sas_event   port_events[PORT_POOL_SIZE];
>> +struct asd_sas_event   phy_events[PHY_POOL_SIZE];
>>
>>  int error;
> 
> Hi Yijing,
> 
> So now we are creating a static pool of events per PHY/port, instead of 
> having 1 static work struct per event per PHY/port. So, for sure, this avoids 
> the dynamic event issue of system memory exhaustion which we discussed in 
> v1+v2 series. And it seems to possibly remove issue of losing SAS events.
> 
> But how did you determine the pool size for a PHY/port? It would seem to be 5 
> * #phy events or #port events (which is also 5, I figure by coincidence). How 
> does this deal with flutter of >25 events?

There is no special meaning for the pool size, if flutter of > 25 events, 
notify sas events will return error, and the further step work is depending on 
LLDD drivers.
I hope libsas could do more work in this case, but now it seems a little 
difficult, this patch may be a interim fix, until we find a perfect solution.

Thanks!
Yijing.

> 
> Thanks,
> John
> 
> 
> .
> 



Re: [PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-11 Thread John Garry

On 10/07/2017 08:06, Yijing Wang wrote:

Now libsas hotplug work is static, every sas event type has its own
static work, LLDD driver queue the hotplug work into shost->work_q.
If LLDD driver burst post lots hotplug events to libsas, the hotplug
events may pending in the workqueue like

shost->work_q
new work[PORTE_BYTES_DMAED] --> |[PHYE_LOSS_OF_SIGNAL][PORTE_BYTES_DMAED] -> 
processing
|<---wait worker to process>|
In this case, a new PORTE_BYTES_DMAED event coming, libsas try to queue it
to shost->work_q, but this work is already pending, so it would be lost.
Finally, libsas delete the related sas port and sas devices, but LLDD driver
expect libsas add the sas port and devices(last sas event).

This patch and use static sas event work pool to appease this issue, since
it's static work pool, it won't make memory exhaust.



[ ... ]



+#definePORT_POOL_SIZE  (PORT_NUM_EVENTS * 5)
+#definePHY_POOL_SIZE   (PHY_NUM_EVENTS * 5)
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
 struct asd_sas_phy {
 /* private: */
-   struct asd_sas_event   port_events[PORT_NUM_EVENTS];
-   struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
-
-   unsigned long port_events_pending;
-   unsigned long phy_events_pending;
+   struct asd_sas_event   port_events[PORT_POOL_SIZE];
+   struct asd_sas_event   phy_events[PHY_POOL_SIZE];

int error;


Hi Yijing,

So now we are creating a static pool of events per PHY/port, instead of 
having 1 static work struct per event per PHY/port. So, for sure, this 
avoids the dynamic event issue of system memory exhaustion which we 
discussed in v1+v2 series. And it seems to possibly remove issue of 
losing SAS events.


But how did you determine the pool size for a PHY/port? It would seem to 
be 5 * #phy events or #port events (which is also 5, I figure by 
coincidence). How does this deal with flutter of >25 events?


Thanks,
John



[PATCH v3 1/7] libsas: Use static sas event pool to appease sas event lost

2017-07-10 Thread Yijing Wang
Now libsas hotplug work is static, every sas event type has its own
static work, LLDD driver queue the hotplug work into shost->work_q.
If LLDD driver burst post lots hotplug events to libsas, the hotplug
events may pending in the workqueue like

shost->work_q
new work[PORTE_BYTES_DMAED] --> |[PHYE_LOSS_OF_SIGNAL][PORTE_BYTES_DMAED] -> 
processing
|<---wait worker to process>|
In this case, a new PORTE_BYTES_DMAED event coming, libsas try to queue it
to shost->work_q, but this work is already pending, so it would be lost.
Finally, libsas delete the related sas port and sas devices, but LLDD driver
expect libsas add the sas port and devices(last sas event).

This patch and use static sas event work pool to appease this issue, since
it's static work pool, it won't make memory exhaust.

Signed-off-by: Yijing Wang 
CC: John Garry 
CC: Johannes Thumshirn 
CC: Ewan Milne 
CC: Christoph Hellwig 
CC: Tomas Henzl 
CC: Dan Williams 
---
 drivers/scsi/libsas/sas_event.c| 208 -
 drivers/scsi/libsas/sas_init.c |   6 --
 drivers/scsi/libsas/sas_internal.h |   3 +
 drivers/scsi/libsas/sas_phy.c  |  48 +++--
 drivers/scsi/libsas/sas_port.c |  18 ++--
 include/scsi/libsas.h  |  16 +--
 6 files changed, 216 insertions(+), 83 deletions(-)

diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index c0d0d97..a1370bd 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,13 +27,20 @@
 #include "sas_internal.h"
 #include "sas_dump.h"
 
+static DEFINE_SPINLOCK(sas_event_lock);
+
+static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
+  [HAE_RESET] = sas_hae_reset,
+};
+
 int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
 {
int rc = 0;
 
if (!test_bit(SAS_HA_REGISTERED, >state))
-   return 0;
+   return rc;
 
+   rc = 1;
if (test_bit(SAS_HA_DRAINING, >state)) {
/* add it to the defer list, if not already pending */
if (list_empty(>drain_node))
@@ -44,19 +51,15 @@ int sas_queue_work(struct sas_ha_struct *ha, struct 
sas_work *sw)
return rc;
 }
 
-static int sas_queue_event(int event, unsigned long *pending,
-   struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
 {
int rc = 0;
+   unsigned long flags;
 
-   if (!test_and_set_bit(event, pending)) {
-   unsigned long flags;
-
-   spin_lock_irqsave(>lock, flags);
-   rc = sas_queue_work(ha, work);
-   spin_unlock_irqrestore(>lock, flags);
-   }
+   spin_lock_irqsave(>lock, flags);
+   rc = sas_queue_work(ha, work);
+   spin_unlock_irqrestore(>lock, flags);
 
return rc;
 }
@@ -64,6 +67,8 @@ static int sas_queue_event(int event, unsigned long *pending,
 
 void __sas_drain_work(struct sas_ha_struct *ha)
 {
+   int ret;
+   unsigned long flags;
struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
 
@@ -78,7 +83,12 @@ void __sas_drain_work(struct sas_ha_struct *ha)
clear_bit(SAS_HA_DRAINING, >state);
list_for_each_entry_safe(sw, _sw, >defer_q, drain_node) {
list_del_init(>drain_node);
-   sas_queue_work(ha, sw);
+   ret = sas_queue_work(ha, sw);
+   if (ret != 1) {
+   spin_lock_irqsave(_event_lock, flags);
+   sw->used = false;
+   spin_unlock_irqrestore(_event_lock, flags);
+   }
}
spin_unlock_irq(>lock);
 }
@@ -119,51 +129,197 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, >pending))
continue;
 
-   sas_queue_event(ev, >pending, >disc_work[ev].work, ha);
+   sas_queue_event(ev, >disc_work[ev].work, ha);
}
mutex_unlock(>disco_mutex);
 }
 
+static void sas_free_ha_event(struct sas_ha_event *event)
+{
+   unsigned long flags;
+   spin_lock_irqsave(_event_lock, flags);
+   event->work.used = false;
+   spin_unlock_irqrestore(_event_lock, flags);
+}
+
+static void sas_free_port_event(struct asd_sas_event *event)
+{
+   unsigned long flags;
+   spin_lock_irqsave(_event_lock, flags);
+   event->work.used = false;
+   spin_unlock_irqrestore(_event_lock, flags);
+}
+
+static void sas_free_phy_event(struct asd_sas_event *event)
+{
+   unsigned long flags;
+   spin_lock_irqsave(_event_lock, flags);
+   event->work.used = false;
+   spin_unlock_irqrestore(_event_lock, flags);
+}
+
+static void