Re: [Qemu-devel] [PATCH 1/3] nbd: Drop BDS backpointer
On 02/02/2015 22:40, Max Reitz wrote: Before this patch, the opaque pointer in an NBD BDS points to a BDRVNBDState, which contains an NbdClientSession object, which in turn contains a pointer to the BDS. This pointer may become invalid due to bdrv_swap(), so drop it, and instead pass the BDS directly to the nbd-client.c functions which then retrieve the NbdClientSession object from there. Looks good, but please change function names from nbd_client_session_foo to nbd_client_foo or even just nbd_foo if they do not take an NbdClientSession* as the first parameter. Thanks, Paolo Signed-off-by: Max Reitz mre...@redhat.com --- block/nbd-client.c | 95 -- block/nbd-client.h | 20 ++-- block/nbd.c| 37 - 3 files changed, 73 insertions(+), 79 deletions(-) diff --git a/block/nbd-client.c b/block/nbd-client.c index 28bfb62..4ede714 100644 --- a/block/nbd-client.c +++ b/block/nbd-client.c @@ -43,20 +43,23 @@ static void nbd_recv_coroutines_enter_all(NbdClientSession *s) } } -static void nbd_teardown_connection(NbdClientSession *client) +static void nbd_teardown_connection(BlockDriverState *bs) { +NbdClientSession *client = nbd_get_client_session(bs); + /* finish any pending coroutines */ shutdown(client-sock, 2); nbd_recv_coroutines_enter_all(client); -nbd_client_session_detach_aio_context(client); +nbd_client_session_detach_aio_context(bs); closesocket(client-sock); client-sock = -1; } static void nbd_reply_ready(void *opaque) { -NbdClientSession *s = opaque; +BlockDriverState *bs = opaque; +NbdClientSession *s = nbd_get_client_session(bs); uint64_t i; int ret; @@ -89,28 +92,29 @@ static void nbd_reply_ready(void *opaque) } fail: -nbd_teardown_connection(s); +nbd_teardown_connection(bs); } static void nbd_restart_write(void *opaque) { -NbdClientSession *s = opaque; +BlockDriverState *bs = opaque; -qemu_coroutine_enter(s-send_coroutine, NULL); +qemu_coroutine_enter(nbd_get_client_session(bs)-send_coroutine, NULL); } -static int nbd_co_send_request(NbdClientSession *s, -struct nbd_request *request, -QEMUIOVector *qiov, int offset) +static int nbd_co_send_request(BlockDriverState *bs, + struct nbd_request *request, + QEMUIOVector *qiov, int offset) { +NbdClientSession *s = nbd_get_client_session(bs); AioContext *aio_context; int rc, ret; qemu_co_mutex_lock(s-send_mutex); s-send_coroutine = qemu_coroutine_self(); -aio_context = bdrv_get_aio_context(s-bs); +aio_context = bdrv_get_aio_context(bs); aio_set_fd_handler(aio_context, s-sock, - nbd_reply_ready, nbd_restart_write, s); + nbd_reply_ready, nbd_restart_write, bs); if (qiov) { if (!s-is_unix) { socket_set_cork(s-sock, 1); @@ -129,7 +133,7 @@ static int nbd_co_send_request(NbdClientSession *s, } else { rc = nbd_send_request(s-sock, request); } -aio_set_fd_handler(aio_context, s-sock, nbd_reply_ready, NULL, s); +aio_set_fd_handler(aio_context, s-sock, nbd_reply_ready, NULL, bs); s-send_coroutine = NULL; qemu_co_mutex_unlock(s-send_mutex); return rc; @@ -195,10 +199,11 @@ static void nbd_coroutine_end(NbdClientSession *s, } } -static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { +NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_READ }; struct nbd_reply reply; ssize_t ret; @@ -207,7 +212,7 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, request.len = nb_sectors * 512; nbd_coroutine_start(client, request); -ret = nbd_co_send_request(client, request, NULL, 0); +ret = nbd_co_send_request(bs, request, NULL, 0); if (ret 0) { reply.error = -ret; } else { @@ -218,15 +223,16 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, } -static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num, +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { +NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_WRITE }; struct nbd_reply reply; ssize_t ret; -if (!bdrv_enable_write_cache(client-bs) +if (!bdrv_enable_write_cache(bs) (client-nbdflags
Re: [Qemu-devel] [PATCH 1/3] nbd: Drop BDS backpointer
On 2015-02-03 at 03:37, Paolo Bonzini wrote: On 02/02/2015 22:40, Max Reitz wrote: Before this patch, the opaque pointer in an NBD BDS points to a BDRVNBDState, which contains an NbdClientSession object, which in turn contains a pointer to the BDS. This pointer may become invalid due to bdrv_swap(), so drop it, and instead pass the BDS directly to the nbd-client.c functions which then retrieve the NbdClientSession object from there. Looks good, but please change function names from nbd_client_session_foo to nbd_client_foo or even just nbd_foo if they do not take an NbdClientSession* as the first parameter. Ah, that makes a lot of sense, especially concerning the callback functions (albeit they were named nbd_foo already, but well...) which only take a void pointer. Will do, thanks, Max Thanks, Paolo
[Qemu-devel] [PATCH 1/3] nbd: Drop BDS backpointer
Before this patch, the opaque pointer in an NBD BDS points to a BDRVNBDState, which contains an NbdClientSession object, which in turn contains a pointer to the BDS. This pointer may become invalid due to bdrv_swap(), so drop it, and instead pass the BDS directly to the nbd-client.c functions which then retrieve the NbdClientSession object from there. Signed-off-by: Max Reitz mre...@redhat.com --- block/nbd-client.c | 95 -- block/nbd-client.h | 20 ++-- block/nbd.c| 37 - 3 files changed, 73 insertions(+), 79 deletions(-) diff --git a/block/nbd-client.c b/block/nbd-client.c index 28bfb62..4ede714 100644 --- a/block/nbd-client.c +++ b/block/nbd-client.c @@ -43,20 +43,23 @@ static void nbd_recv_coroutines_enter_all(NbdClientSession *s) } } -static void nbd_teardown_connection(NbdClientSession *client) +static void nbd_teardown_connection(BlockDriverState *bs) { +NbdClientSession *client = nbd_get_client_session(bs); + /* finish any pending coroutines */ shutdown(client-sock, 2); nbd_recv_coroutines_enter_all(client); -nbd_client_session_detach_aio_context(client); +nbd_client_session_detach_aio_context(bs); closesocket(client-sock); client-sock = -1; } static void nbd_reply_ready(void *opaque) { -NbdClientSession *s = opaque; +BlockDriverState *bs = opaque; +NbdClientSession *s = nbd_get_client_session(bs); uint64_t i; int ret; @@ -89,28 +92,29 @@ static void nbd_reply_ready(void *opaque) } fail: -nbd_teardown_connection(s); +nbd_teardown_connection(bs); } static void nbd_restart_write(void *opaque) { -NbdClientSession *s = opaque; +BlockDriverState *bs = opaque; -qemu_coroutine_enter(s-send_coroutine, NULL); +qemu_coroutine_enter(nbd_get_client_session(bs)-send_coroutine, NULL); } -static int nbd_co_send_request(NbdClientSession *s, -struct nbd_request *request, -QEMUIOVector *qiov, int offset) +static int nbd_co_send_request(BlockDriverState *bs, + struct nbd_request *request, + QEMUIOVector *qiov, int offset) { +NbdClientSession *s = nbd_get_client_session(bs); AioContext *aio_context; int rc, ret; qemu_co_mutex_lock(s-send_mutex); s-send_coroutine = qemu_coroutine_self(); -aio_context = bdrv_get_aio_context(s-bs); +aio_context = bdrv_get_aio_context(bs); aio_set_fd_handler(aio_context, s-sock, - nbd_reply_ready, nbd_restart_write, s); + nbd_reply_ready, nbd_restart_write, bs); if (qiov) { if (!s-is_unix) { socket_set_cork(s-sock, 1); @@ -129,7 +133,7 @@ static int nbd_co_send_request(NbdClientSession *s, } else { rc = nbd_send_request(s-sock, request); } -aio_set_fd_handler(aio_context, s-sock, nbd_reply_ready, NULL, s); +aio_set_fd_handler(aio_context, s-sock, nbd_reply_ready, NULL, bs); s-send_coroutine = NULL; qemu_co_mutex_unlock(s-send_mutex); return rc; @@ -195,10 +199,11 @@ static void nbd_coroutine_end(NbdClientSession *s, } } -static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { +NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_READ }; struct nbd_reply reply; ssize_t ret; @@ -207,7 +212,7 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, request.len = nb_sectors * 512; nbd_coroutine_start(client, request); -ret = nbd_co_send_request(client, request, NULL, 0); +ret = nbd_co_send_request(bs, request, NULL, 0); if (ret 0) { reply.error = -ret; } else { @@ -218,15 +223,16 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, } -static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num, +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { +NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_WRITE }; struct nbd_reply reply; ssize_t ret; -if (!bdrv_enable_write_cache(client-bs) +if (!bdrv_enable_write_cache(bs) (client-nbdflags NBD_FLAG_SEND_FUA)) { request.type |= NBD_CMD_FLAG_FUA; } @@ -235,7 +241,7 @@ static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num, request.len = nb_sectors * 512; nbd_coroutine_start(client, request); -ret = nbd_co_send_request(client, request, qiov, offset); +ret = nbd_co_send_request(bs, request, qiov, offset);