Re: [Xen-devel] [PATCH v4 12/18] xen/pvcalls: implement poll command

2017-06-21 Thread Stefano Stabellini
On Wed, 21 Jun 2017, Boris Ostrovsky wrote:
> >>> +
> >>> + mappass->reqcopy = *req;
> >>> + icsk = inet_csk(mappass->sock->sk);
> >>> + queue = >icsk_accept_queue;
> >>> + spin_lock(>rskq_lock);
> >>> + data = queue->rskq_accept_head != NULL;
> >>> + spin_unlock(>rskq_lock);
> >> What is the purpose of the queue lock here?
> > It is only there to protect accesses to rskq_accept_head. Functions that
> > change rskq_accept_head take this lock, see for example
> > net/ipv4/inet_connection_sock.c:inet_csk_reqsk_queue_add. I'll add an
> > in-code comment.
> 
> I am not sure I follow. You are not changing rskq_accept_head, you are
> simply reading it under the lock. It may be set by others to NULL as
> soon as you drop the lock, at which point 'data' test below will be
> obsolete.
> 
> In inet_csk_reqsk_queue_add() it is read and then, based on read result,
> is written with a value so a lock is indeed need there.

I think you are right. The only thing is that without the lock we might
read a transitory value as the rskq_accept_head reads/writes are not
guaranteed to be atomic. However, I don't think we care about it, since
this is just a != NULL test and, as you wrote, the result could be
obsolete immediately after. I'll drop the lock.



> >
> >
> >>> + if (data) {
> >>> + mappass->reqcopy.cmd = 0;
> >>> + ret = 0;
> >>> + goto out;
> >>> + }
> >>> + spin_unlock_irqrestore(>copy_lock, flags);
> >>> +
> >>> + /* Tell the caller we don't need to send back a notification yet */
> >>> + return -1;
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 12/18] xen/pvcalls: implement poll command

2017-06-21 Thread Boris Ostrovsky

>>> +
>>> +   mappass->reqcopy = *req;
>>> +   icsk = inet_csk(mappass->sock->sk);
>>> +   queue = >icsk_accept_queue;
>>> +   spin_lock(>rskq_lock);
>>> +   data = queue->rskq_accept_head != NULL;
>>> +   spin_unlock(>rskq_lock);
>> What is the purpose of the queue lock here?
> It is only there to protect accesses to rskq_accept_head. Functions that
> change rskq_accept_head take this lock, see for example
> net/ipv4/inet_connection_sock.c:inet_csk_reqsk_queue_add. I'll add an
> in-code comment.

I am not sure I follow. You are not changing rskq_accept_head, you are
simply reading it under the lock. It may be set by others to NULL as
soon as you drop the lock, at which point 'data' test below will be
obsolete.

In inet_csk_reqsk_queue_add() it is read and then, based on read result,
is written with a value so a lock is indeed need there.

-boris

>
>
>>> +   if (data) {
>>> +   mappass->reqcopy.cmd = 0;
>>> +   ret = 0;
>>> +   goto out;
>>> +   }
>>> +   spin_unlock_irqrestore(>copy_lock, flags);
>>> +
>>> +   /* Tell the caller we don't need to send back a notification yet */
>>> +   return -1;


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 12/18] xen/pvcalls: implement poll command

2017-06-21 Thread Stefano Stabellini
On Tue, 20 Jun 2017, Boris Ostrovsky wrote:
> > @@ -499,6 +521,55 @@ static int pvcalls_back_accept(struct xenbus_device 
> > *dev,
> >  static int pvcalls_back_poll(struct xenbus_device *dev,
> >  struct xen_pvcalls_request *req)
> >  {
> > +   struct pvcalls_fedata *fedata;
> > +   struct sockpass_mapping *mappass;
> > +   struct xen_pvcalls_response *rsp;
> > +   struct inet_connection_sock *icsk;
> > +   struct request_sock_queue *queue;
> > +   unsigned long flags;
> > +   int ret;
> > +   bool data;
> > +
> > +   fedata = dev_get_drvdata(>dev);
> > +
> > +   mappass = radix_tree_lookup(>socketpass_mappings, 
> > req->u.poll.id);
> > +   if (mappass == NULL)
> > +   return -EINVAL;
> > +
> > +   /*
> > +* Limitation of the current implementation: only support one
> > +* concurrent accept or poll call on one socket.
> > +*/
> > +   spin_lock_irqsave(>copy_lock, flags);
> > +   if (mappass->reqcopy.cmd != 0) {
> > +   ret = -EINTR;
> > +   goto out;
> > +   }
> > +
> > +   mappass->reqcopy = *req;
> > +   icsk = inet_csk(mappass->sock->sk);
> > +   queue = >icsk_accept_queue;
> > +   spin_lock(>rskq_lock);
> > +   data = queue->rskq_accept_head != NULL;
> > +   spin_unlock(>rskq_lock);
> 
> What is the purpose of the queue lock here?

It is only there to protect accesses to rskq_accept_head. Functions that
change rskq_accept_head take this lock, see for example
net/ipv4/inet_connection_sock.c:inet_csk_reqsk_queue_add. I'll add an
in-code comment.


> > +   if (data) {
> > +   mappass->reqcopy.cmd = 0;
> > +   ret = 0;
> > +   goto out;
> > +   }
> > +   spin_unlock_irqrestore(>copy_lock, flags);
> > +
> > +   /* Tell the caller we don't need to send back a notification yet */
> > +   return -1;

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 12/18] xen/pvcalls: implement poll command

2017-06-20 Thread Boris Ostrovsky

> @@ -499,6 +521,55 @@ static int pvcalls_back_accept(struct xenbus_device *dev,
>  static int pvcalls_back_poll(struct xenbus_device *dev,
>struct xen_pvcalls_request *req)
>  {
> + struct pvcalls_fedata *fedata;
> + struct sockpass_mapping *mappass;
> + struct xen_pvcalls_response *rsp;
> + struct inet_connection_sock *icsk;
> + struct request_sock_queue *queue;
> + unsigned long flags;
> + int ret;
> + bool data;
> +
> + fedata = dev_get_drvdata(>dev);
> +
> + mappass = radix_tree_lookup(>socketpass_mappings, 
> req->u.poll.id);
> + if (mappass == NULL)
> + return -EINVAL;
> +
> + /*
> +  * Limitation of the current implementation: only support one
> +  * concurrent accept or poll call on one socket.
> +  */
> + spin_lock_irqsave(>copy_lock, flags);
> + if (mappass->reqcopy.cmd != 0) {
> + ret = -EINTR;
> + goto out;
> + }
> +
> + mappass->reqcopy = *req;
> + icsk = inet_csk(mappass->sock->sk);
> + queue = >icsk_accept_queue;
> + spin_lock(>rskq_lock);
> + data = queue->rskq_accept_head != NULL;
> + spin_unlock(>rskq_lock);

What is the purpose of the queue lock here?

-boris

> + if (data) {
> + mappass->reqcopy.cmd = 0;
> + ret = 0;
> + goto out;
> + }
> + spin_unlock_irqrestore(>copy_lock, flags);
> +
> + /* Tell the caller we don't need to send back a notification yet */
> + return -1;
>


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 12/18] xen/pvcalls: implement poll command

2017-06-15 Thread Stefano Stabellini
Implement poll on passive sockets by requesting a delayed response with
mappass->reqcopy, and reply back when there is data on the passive
socket.

Poll on active socket is unimplemented as by the spec, as the frontend
should just wait for events and check the indexes on the indexes page.

Only support one outstanding poll (or accept) request for every passive
socket at any given time.

Signed-off-by: Stefano Stabellini 
CC: boris.ostrov...@oracle.com
CC: jgr...@suse.com
---
 drivers/xen/pvcalls-back.c | 73 +-
 1 file changed, 72 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 701f1fc..be85977 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -348,11 +348,33 @@ static void __pvcalls_back_accept(struct work_struct 
*work)
 static void pvcalls_pass_sk_data_ready(struct sock *sock)
 {
struct sockpass_mapping *mappass = sock->sk_user_data;
+   struct pvcalls_fedata *fedata;
+   struct xen_pvcalls_response *rsp;
+   unsigned long flags;
+   int notify;
 
if (mappass == NULL)
return;
 
-   queue_work(mappass->wq, >register_work);
+   fedata = mappass->fedata;
+   spin_lock_irqsave(>copy_lock, flags);
+   if (mappass->reqcopy.cmd == PVCALLS_POLL) {
+   rsp = RING_GET_RESPONSE(>ring, 
fedata->ring.rsp_prod_pvt++);
+   rsp->req_id = mappass->reqcopy.req_id;
+   rsp->u.poll.id = mappass->reqcopy.u.poll.id;
+   rsp->cmd = mappass->reqcopy.cmd;
+   rsp->ret = 0;
+
+   mappass->reqcopy.cmd = 0;
+   spin_unlock_irqrestore(>copy_lock, flags);
+
+   RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(>ring, notify);
+   if (notify)
+   notify_remote_via_irq(mappass->fedata->irq);
+   } else {
+   spin_unlock_irqrestore(>copy_lock, flags);
+   queue_work(mappass->wq, >register_work);
+   }
 }
 
 static int pvcalls_back_bind(struct xenbus_device *dev,
@@ -499,6 +521,55 @@ static int pvcalls_back_accept(struct xenbus_device *dev,
 static int pvcalls_back_poll(struct xenbus_device *dev,
 struct xen_pvcalls_request *req)
 {
+   struct pvcalls_fedata *fedata;
+   struct sockpass_mapping *mappass;
+   struct xen_pvcalls_response *rsp;
+   struct inet_connection_sock *icsk;
+   struct request_sock_queue *queue;
+   unsigned long flags;
+   int ret;
+   bool data;
+
+   fedata = dev_get_drvdata(>dev);
+
+   mappass = radix_tree_lookup(>socketpass_mappings, 
req->u.poll.id);
+   if (mappass == NULL)
+   return -EINVAL;
+
+   /*
+* Limitation of the current implementation: only support one
+* concurrent accept or poll call on one socket.
+*/
+   spin_lock_irqsave(>copy_lock, flags);
+   if (mappass->reqcopy.cmd != 0) {
+   ret = -EINTR;
+   goto out;
+   }
+
+   mappass->reqcopy = *req;
+   icsk = inet_csk(mappass->sock->sk);
+   queue = >icsk_accept_queue;
+   spin_lock(>rskq_lock);
+   data = queue->rskq_accept_head != NULL;
+   spin_unlock(>rskq_lock);
+   if (data) {
+   mappass->reqcopy.cmd = 0;
+   ret = 0;
+   goto out;
+   }
+   spin_unlock_irqrestore(>copy_lock, flags);
+
+   /* Tell the caller we don't need to send back a notification yet */
+   return -1;
+
+out:
+   spin_unlock_irqrestore(>copy_lock, flags);
+
+   rsp = RING_GET_RESPONSE(>ring, fedata->ring.rsp_prod_pvt++);
+   rsp->req_id = req->req_id;
+   rsp->cmd = req->cmd;
+   rsp->u.poll.id = req->u.poll.id;
+   rsp->ret = ret;
return 0;
 }
 
-- 
1.9.1


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel