Add support for NBD_CMD_CLOSE. NBD_CMD_CLOSE waits for all inflight
commands to complete, then does a FLUSH & reply (reporting any error,
which is optional in the spec), then closes the channel (which
is optional in the spec and an easy way to avoid processing further
commands).

Signed-off-by: Alex Bligh <[email protected]>
---
 nbd-server.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------
 nbd.h        |  4 +++-
 2 files changed, 49 insertions(+), 7 deletions(-)

This is completely untested my end apart from noting it compiles.
I have no server that sends NBD_CMD_CLOSE to test it against, but
Eric is working on that, and I thought he might like a reference
implementation.

diff --git a/nbd-server.c b/nbd-server.c
index 4edb883..1b558d7 100644
--- a/nbd-server.c
+++ b/nbd-server.c
@@ -261,6 +261,8 @@ static inline const char * getcommandname(uint64_t command) 
{
                return "NBD_CMD_FLUSH";
        case NBD_CMD_TRIM:
                return "NBD_CMD_TRIM";
+       case NBD_CMD_CLOSE:
+               return "NBD_CMD_CLOSE";
        default:
                return "UNKNOWN";
        }
@@ -1396,6 +1398,7 @@ void send_export_info(CLIENT* client) {
                flags |= NBD_FLAG_ROTATIONAL;
        if (client->server->flags & F_TRIM)
                flags |= NBD_FLAG_SEND_TRIM;
+       flags |= NBD_FLAG_SEND_CLOSE;
        flags = htons(flags);
        if (write(client->net, &flags, sizeof(flags)) < 0)
                err("Negotiation failed/11: %m");
@@ -1517,6 +1520,19 @@ static void handle_trim(CLIENT* client, struct 
nbd_request* req) {
        pthread_mutex_unlock(&(client->lock));
 }
 
+static void handle_close(CLIENT* client, struct nbd_request* req) {
+       struct nbd_reply rep;
+       DEBUG("handling close request\n");
+       setup_reply(&rep, req);
+       if(expflush(client)) {
+               DEBUG("Flush on close failed: %m");
+               rep.error = nbd_errno(errno);
+       }
+       pthread_mutex_lock(&(client->lock));
+       writeit(client->net, &rep, sizeof rep);
+       pthread_mutex_unlock(&(client->lock));
+}
+
 static void handle_request(gpointer data, gpointer user_data) {
        struct work_package* package = (struct work_package*) data;
        uint32_t type = package->req->type & NBD_CMD_MASK_COMMAND;
@@ -1541,6 +1557,9 @@ static void handle_request(gpointer data, gpointer 
user_data) {
                case NBD_CMD_TRIM:
                        handle_trim(package->client, package->req);
                        break;
+               case NBD_CMD_CLOSE:
+                       handle_close(package->client, package->req);
+                       break;
                default:
                        msg(LOG_ERR, "E: received unknown command %d of type, 
ignoring", package->req->type);
                        goto error;
@@ -1586,6 +1605,14 @@ static int mainloop_threaded(CLIENT* client) {
                        g_thread_pool_free(tpool, FALSE, TRUE);
                        return 0;
                }
+               if(req->type == NBD_CMD_CLOSE) {
+                       // we call with immediate FALSE which ensures all 
current
+                       // threads end their work, then manually handle the 
close
+                       // request
+                       g_thread_pool_free(tpool, FALSE, TRUE);
+                       handle_request(pkg, NULL);
+                       return 0;
+               }
                g_thread_pool_push(tpool, pkg, NULL);
        }
 }
@@ -1673,12 +1700,6 @@ int mainloop(CLIENT *client) {
 
                case NBD_CMD_DISC:
                        msg(LOG_INFO, "Disconnect request received.");
-                       if (client->server->flags & F_COPYONWRITE) { 
-                               if (client->difmap) g_free(client->difmap) ;
-                               close(client->difffile);
-                               unlink(client->difffilename);
-                               free(client->difffilename);
-                       }
                        go_on=FALSE;
                        continue;
 
@@ -1762,11 +1783,30 @@ int mainloop(CLIENT *client) {
                        SEND(client->net, reply);
                        continue;
 
+               case NBD_CMD_CLOSE:
+                       DEBUG("cl: ");
+                       if (expflush(client)) {
+                               DEBUG("Flush on close failed: %m");
+                               ERROR(client, reply, errno);
+                       } else {
+                               SEND(client->net, reply);
+                               DEBUG("OK!\n");
+                       }
+                       msg(LOG_INFO, "Close request received.");
+                       go_on=FALSE;
+                       continue;
+
                default:
                        DEBUG ("Ignoring unknown command\n");
                        continue;
                }
        }
+       if (client->server->flags & F_COPYONWRITE) {
+               if (client->difmap) g_free(client->difmap) ;
+               close(client->difffile);
+               unlink(client->difffilename);
+               free(client->difffilename);
+       }
        return 0;
 }
 
diff --git a/nbd.h b/nbd.h
index 732c605..1841746 100644
--- a/nbd.h
+++ b/nbd.h
@@ -34,7 +34,8 @@ enum {
        NBD_CMD_WRITE = 1,
        NBD_CMD_DISC = 2,
        NBD_CMD_FLUSH = 3,
-       NBD_CMD_TRIM = 4
+       NBD_CMD_TRIM = 4,
+       NBD_CMD_CLOSE = 7
 };
 
 #define NBD_CMD_MASK_COMMAND 0x0000ffff
@@ -48,6 +49,7 @@ enum {
 #define NBD_FLAG_SEND_FUA      (1 << 3)        /* Send FUA (Force Unit Access) 
*/
 #define NBD_FLAG_ROTATIONAL    (1 << 4)        /* Use elevator algorithm - 
rotational media */
 #define NBD_FLAG_SEND_TRIM     (1 << 5)        /* Send TRIM (discard) */
+#define NBD_FLAG_SEND_CLOSE    (1 << 8)        /* Send CLOSE */
 
 #define nbd_cmd(req) ((req)->cmd[0])
 
-- 
1.9.1


------------------------------------------------------------------------------
_______________________________________________
Nbd-general mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/nbd-general

Reply via email to