Re: [PATCH v2 11/18] xen/pvcalls: implement accept command
On Fri, 26 May 2017, Boris Ostrovsky wrote: > > static void __pvcalls_back_accept(struct work_struct *work) > > { > > + struct sockpass_mapping *mappass = container_of( > > + work, struct sockpass_mapping, register_work); > > + struct sock_mapping *map; > > + struct pvcalls_ioworker *iow; > > + struct pvcalls_back_priv *priv; > > + struct xen_pvcalls_response *rsp; > > + struct xen_pvcalls_request *req; > > + void *page = NULL; > > + int notify; > > + int ret = -EINVAL; > > + unsigned long flags; > > + > > + priv = mappass->priv; > > + /* We only need to check the value of "cmd" atomically on read. */ > > + spin_lock_irqsave(>copy_lock, flags); > > + req = >reqcopy; > > + if (req->cmd != PVCALLS_ACCEPT) { > > + spin_unlock_irqrestore(>copy_lock, flags); > > + return; > > + } > > + spin_unlock_irqrestore(>copy_lock, flags); > > + > > + map = kzalloc(sizeof(*map), GFP_KERNEL); > > >From here on, the code looks almost identical to connect. Can this be > factored out? Yes, good idea, I'll do that
Re: [PATCH v2 11/18] xen/pvcalls: implement accept command
On Fri, 26 May 2017, Boris Ostrovsky wrote: > > static void __pvcalls_back_accept(struct work_struct *work) > > { > > + struct sockpass_mapping *mappass = container_of( > > + work, struct sockpass_mapping, register_work); > > + struct sock_mapping *map; > > + struct pvcalls_ioworker *iow; > > + struct pvcalls_back_priv *priv; > > + struct xen_pvcalls_response *rsp; > > + struct xen_pvcalls_request *req; > > + void *page = NULL; > > + int notify; > > + int ret = -EINVAL; > > + unsigned long flags; > > + > > + priv = mappass->priv; > > + /* We only need to check the value of "cmd" atomically on read. */ > > + spin_lock_irqsave(>copy_lock, flags); > > + req = >reqcopy; > > + if (req->cmd != PVCALLS_ACCEPT) { > > + spin_unlock_irqrestore(>copy_lock, flags); > > + return; > > + } > > + spin_unlock_irqrestore(>copy_lock, flags); > > + > > + map = kzalloc(sizeof(*map), GFP_KERNEL); > > >From here on, the code looks almost identical to connect. Can this be > factored out? Yes, good idea, I'll do that
Re: [PATCH v2 11/18] xen/pvcalls: implement accept command
> static void __pvcalls_back_accept(struct work_struct *work) > { > + struct sockpass_mapping *mappass = container_of( > + work, struct sockpass_mapping, register_work); > + struct sock_mapping *map; > + struct pvcalls_ioworker *iow; > + struct pvcalls_back_priv *priv; > + struct xen_pvcalls_response *rsp; > + struct xen_pvcalls_request *req; > + void *page = NULL; > + int notify; > + int ret = -EINVAL; > + unsigned long flags; > + > + priv = mappass->priv; > + /* We only need to check the value of "cmd" atomically on read. */ > + spin_lock_irqsave(>copy_lock, flags); > + req = >reqcopy; > + if (req->cmd != PVCALLS_ACCEPT) { > + spin_unlock_irqrestore(>copy_lock, flags); > + return; > + } > + spin_unlock_irqrestore(>copy_lock, flags); > + > + map = kzalloc(sizeof(*map), GFP_KERNEL); >From here on, the code looks almost identical to connect. Can this be factored out? -boris
Re: [PATCH v2 11/18] xen/pvcalls: implement accept command
> static void __pvcalls_back_accept(struct work_struct *work) > { > + struct sockpass_mapping *mappass = container_of( > + work, struct sockpass_mapping, register_work); > + struct sock_mapping *map; > + struct pvcalls_ioworker *iow; > + struct pvcalls_back_priv *priv; > + struct xen_pvcalls_response *rsp; > + struct xen_pvcalls_request *req; > + void *page = NULL; > + int notify; > + int ret = -EINVAL; > + unsigned long flags; > + > + priv = mappass->priv; > + /* We only need to check the value of "cmd" atomically on read. */ > + spin_lock_irqsave(>copy_lock, flags); > + req = >reqcopy; > + if (req->cmd != PVCALLS_ACCEPT) { > + spin_unlock_irqrestore(>copy_lock, flags); > + return; > + } > + spin_unlock_irqrestore(>copy_lock, flags); > + > + map = kzalloc(sizeof(*map), GFP_KERNEL); >From here on, the code looks almost identical to connect. Can this be factored out? -boris
[PATCH v2 11/18] xen/pvcalls: implement accept command
Implement the accept command by calling inet_accept. To avoid blocking in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get scheduled on sk_data_ready (for a passive socket, it means that there are connections to accept). Use the reqcopy field to store the request. Accept the new socket from the delayed work function, create a new sock_mapping for it, map the indexes page and data ring, and reply to the other end. Allocate an ioworker for the socket. Only support one outstanding blocking accept request for every socket at any time. Add a field to sock_mapping to remember the passive socket from which an active socket was created. Signed-off-by: Stefano StabelliniCC: boris.ostrov...@oracle.com CC: jgr...@suse.com --- drivers/xen/pvcalls-back.c | 161 - 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index de82bf5..bc641a8 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -66,6 +66,7 @@ struct pvcalls_ioworker { struct sock_mapping { struct list_head list; struct pvcalls_back_priv *priv; + struct sockpass_mapping *sockpass; struct socket *sock; uint64_t id; grant_ref_t ref; @@ -267,10 +268,131 @@ static int pvcalls_back_release(struct xenbus_device *dev, static void __pvcalls_back_accept(struct work_struct *work) { + struct sockpass_mapping *mappass = container_of( + work, struct sockpass_mapping, register_work); + struct sock_mapping *map; + struct pvcalls_ioworker *iow; + struct pvcalls_back_priv *priv; + struct xen_pvcalls_response *rsp; + struct xen_pvcalls_request *req; + void *page = NULL; + int notify; + int ret = -EINVAL; + unsigned long flags; + + priv = mappass->priv; + /* We only need to check the value of "cmd" atomically on read. */ + spin_lock_irqsave(>copy_lock, flags); + req = >reqcopy; + if (req->cmd != PVCALLS_ACCEPT) { + spin_unlock_irqrestore(>copy_lock, flags); + return; + } + spin_unlock_irqrestore(>copy_lock, flags); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto out_error; + } + + map->sock = sock_alloc(); + if (!map->sock) + goto out_error; + + map->ref = req->u.accept.ref; + + map->priv = priv; + map->sockpass = mappass; + map->sock->type = mappass->sock->type; + map->sock->ops = mappass->sock->ops; + map->id = req->u.accept.id_new; + + ret = xenbus_map_ring_valloc(priv->dev, >u.accept.ref, 1, ); + if (ret < 0) + goto out_error; + map->ring = page; + map->ring_order = map->ring->ring_order; + /* first read the order, then map the data ring */ + virt_rmb(); + if (map->ring_order > MAX_RING_ORDER) { + ret = -EFAULT; + goto out_error; + } + ret = xenbus_map_ring_valloc(priv->dev, map->ring->ref, +(1 << map->ring_order), ); + if (ret < 0) + goto out_error; + map->bytes = page; + + ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id, + req->u.accept.evtchn, + pvcalls_back_conn_event, + 0, + "pvcalls-backend", + map); + if (ret < 0) + goto out_error; + map->irq = ret; + + map->data.in = map->bytes; + map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); + + map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); + if (!map->ioworker.wq) { + ret = -ENOMEM; + goto out_error; + } + map->ioworker.cpu = get_random_int() % num_online_cpus(); + atomic_set(>io, 1); + INIT_WORK(>ioworker.register_work, pvcalls_back_ioworker); + + down(>socket_lock); + list_add_tail(>list, >socket_mappings); + up(>socket_lock); + + ret = inet_accept(mappass->sock, map->sock, O_NONBLOCK, true); + if (ret == -EAGAIN) + goto out_error; + + write_lock_bh(>sock->sk->sk_callback_lock); + map->saved_data_ready = map->sock->sk->sk_data_ready; + map->sock->sk->sk_user_data = map; + map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; + map->sock->sk->sk_state_change = pvcalls_sk_state_change; + write_unlock_bh(>sock->sk->sk_callback_lock); + + iow = >ioworker; + atomic_inc(>read); + atomic_inc(>io); + queue_work_on(iow->cpu, iow->wq, >register_work); +
[PATCH v2 11/18] xen/pvcalls: implement accept command
Implement the accept command by calling inet_accept. To avoid blocking in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get scheduled on sk_data_ready (for a passive socket, it means that there are connections to accept). Use the reqcopy field to store the request. Accept the new socket from the delayed work function, create a new sock_mapping for it, map the indexes page and data ring, and reply to the other end. Allocate an ioworker for the socket. Only support one outstanding blocking accept request for every socket at any time. Add a field to sock_mapping to remember the passive socket from which an active socket was created. Signed-off-by: Stefano Stabellini CC: boris.ostrov...@oracle.com CC: jgr...@suse.com --- drivers/xen/pvcalls-back.c | 161 - 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index de82bf5..bc641a8 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -66,6 +66,7 @@ struct pvcalls_ioworker { struct sock_mapping { struct list_head list; struct pvcalls_back_priv *priv; + struct sockpass_mapping *sockpass; struct socket *sock; uint64_t id; grant_ref_t ref; @@ -267,10 +268,131 @@ static int pvcalls_back_release(struct xenbus_device *dev, static void __pvcalls_back_accept(struct work_struct *work) { + struct sockpass_mapping *mappass = container_of( + work, struct sockpass_mapping, register_work); + struct sock_mapping *map; + struct pvcalls_ioworker *iow; + struct pvcalls_back_priv *priv; + struct xen_pvcalls_response *rsp; + struct xen_pvcalls_request *req; + void *page = NULL; + int notify; + int ret = -EINVAL; + unsigned long flags; + + priv = mappass->priv; + /* We only need to check the value of "cmd" atomically on read. */ + spin_lock_irqsave(>copy_lock, flags); + req = >reqcopy; + if (req->cmd != PVCALLS_ACCEPT) { + spin_unlock_irqrestore(>copy_lock, flags); + return; + } + spin_unlock_irqrestore(>copy_lock, flags); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto out_error; + } + + map->sock = sock_alloc(); + if (!map->sock) + goto out_error; + + map->ref = req->u.accept.ref; + + map->priv = priv; + map->sockpass = mappass; + map->sock->type = mappass->sock->type; + map->sock->ops = mappass->sock->ops; + map->id = req->u.accept.id_new; + + ret = xenbus_map_ring_valloc(priv->dev, >u.accept.ref, 1, ); + if (ret < 0) + goto out_error; + map->ring = page; + map->ring_order = map->ring->ring_order; + /* first read the order, then map the data ring */ + virt_rmb(); + if (map->ring_order > MAX_RING_ORDER) { + ret = -EFAULT; + goto out_error; + } + ret = xenbus_map_ring_valloc(priv->dev, map->ring->ref, +(1 << map->ring_order), ); + if (ret < 0) + goto out_error; + map->bytes = page; + + ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id, + req->u.accept.evtchn, + pvcalls_back_conn_event, + 0, + "pvcalls-backend", + map); + if (ret < 0) + goto out_error; + map->irq = ret; + + map->data.in = map->bytes; + map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); + + map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); + if (!map->ioworker.wq) { + ret = -ENOMEM; + goto out_error; + } + map->ioworker.cpu = get_random_int() % num_online_cpus(); + atomic_set(>io, 1); + INIT_WORK(>ioworker.register_work, pvcalls_back_ioworker); + + down(>socket_lock); + list_add_tail(>list, >socket_mappings); + up(>socket_lock); + + ret = inet_accept(mappass->sock, map->sock, O_NONBLOCK, true); + if (ret == -EAGAIN) + goto out_error; + + write_lock_bh(>sock->sk->sk_callback_lock); + map->saved_data_ready = map->sock->sk->sk_data_ready; + map->sock->sk->sk_user_data = map; + map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; + map->sock->sk->sk_state_change = pvcalls_sk_state_change; + write_unlock_bh(>sock->sk->sk_callback_lock); + + iow = >ioworker; + atomic_inc(>read); + atomic_inc(>io); + queue_work_on(iow->cpu, iow->wq, >register_work); + +out_error: + if (ret <