modify data structures and add functions to support
add and remove memory regions/slots.
define VHOST_MEMORY_MAX_NREGIONS & modify function
vhost_user_set_mem_table accordingly.
dynamically add new memory slots via vhost_user_add_mem_reg.
remove unused memory slots via vhost_user_rem_mem_reg.
define data structure VhostUserSingleMemReg for single
memory region.
modify data structures VhostUserRequest & VhostUserMsg.

Signed-off-by: Pravin M Bathija <pravin.bath...@dell.com>
---
 lib/vhost/vhost_user.c | 325 +++++++++++++++++++++++++++++++++++------
 lib/vhost/vhost_user.h |  10 ++
 2 files changed, 291 insertions(+), 44 deletions(-)

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index b73dec6a22..dc72007584 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -74,6 +74,9 @@ VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, 
vhost_user_set_features, false, t
 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) 
\
 VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, 
false) \
 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, 
true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_MAX_MEM_SLOTS, 
vhost_user_get_max_mem_slots, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_ADD_MEM_REG, vhost_user_add_mem_reg, true, 
true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_REM_MEM_REG, vhost_user_rem_mem_reg, true, 
true) \
 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, 
true) \
 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, 
true) \
 VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, 
false, true) \
@@ -228,7 +231,17 @@ async_dma_map(struct virtio_net *dev, bool do_map)
 }
 
 static void
-free_mem_region(struct virtio_net *dev)
+free_mem_region(struct rte_vhost_mem_region *reg)
+{
+       if (reg != NULL && reg->host_user_addr) {
+               munmap(reg->mmap_addr, reg->mmap_size);
+               close(reg->fd);
+               memset(reg, 0, sizeof(struct rte_vhost_mem_region));
+       }
+}
+
+static void
+free_all_mem_regions(struct virtio_net *dev)
 {
        uint32_t i;
        struct rte_vhost_mem_region *reg;
@@ -239,12 +252,10 @@ free_mem_region(struct virtio_net *dev)
        if (dev->async_copy && rte_vfio_is_enabled("vfio"))
                async_dma_map(dev, false);
 
-       for (i = 0; i < dev->mem->nregions; i++) {
+       for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
                reg = &dev->mem->regions[i];
-               if (reg->host_user_addr) {
-                       munmap(reg->mmap_addr, reg->mmap_size);
-                       close(reg->fd);
-               }
+               if (reg->mmap_addr)
+                       free_mem_region(reg);
        }
 }
 
@@ -258,7 +269,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
                vdpa_dev->ops->dev_cleanup(dev->vid);
 
        if (dev->mem) {
-               free_mem_region(dev);
+               free_all_mem_regions(dev);
                rte_free(dev->mem);
                dev->mem = NULL;
        }
@@ -707,7 +718,7 @@ numa_realloc(struct virtio_net **pdev, struct 
vhost_virtqueue **pvq)
        vhost_devices[dev->vid] = dev;
 
        mem_size = sizeof(struct rte_vhost_memory) +
-               sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
+               sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS;
        mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
        if (!mem) {
                VHOST_CONFIG_LOG(dev->ifname, ERR,
@@ -811,8 +822,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
        uint32_t i;
        uintptr_t hua = (uintptr_t)ptr;
 
-       for (i = 0; i < mem->nregions; i++) {
+       for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
                r = &mem->regions[i];
+               if (r->host_user_addr == 0)
+                       continue;
                if (hua >= r->host_user_addr &&
                        hua < r->host_user_addr + r->size) {
                        return get_blk_size(r->fd);
@@ -1250,9 +1263,13 @@ vhost_user_postcopy_register(struct virtio_net *dev, int 
main_fd,
         * retrieve the region offset when handling userfaults.
         */
        memory = &ctx->msg.payload.memory;
-       for (i = 0; i < memory->nregions; i++) {
+       for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+               int reg_msg_index = 0;
                reg = &dev->mem->regions[i];
-               memory->regions[i].userspace_addr = reg->host_user_addr;
+               if (reg->host_user_addr == 0)
+                       continue;
+               memory->regions[reg_msg_index].userspace_addr = 
reg->host_user_addr;
+               reg_msg_index++;
        }
 
        /* Send the addresses back to qemu */
@@ -1279,8 +1296,10 @@ vhost_user_postcopy_register(struct virtio_net *dev, int 
main_fd,
        }
 
        /* Now userfault register and we can use the memory */
-       for (i = 0; i < memory->nregions; i++) {
+       for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
                reg = &dev->mem->regions[i];
+               if (reg->host_user_addr == 0)
+                       continue;
                if (vhost_user_postcopy_region_register(dev, reg) < 0)
                        return -1;
        }
@@ -1385,6 +1404,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
        return 0;
 }
 
+static int
+vhost_user_initialize_memory(struct virtio_net **pdev)
+{
+       struct virtio_net *dev = *pdev;
+       int numa_node = SOCKET_ID_ANY;
+
+       /*
+        * If VQ 0 has already been allocated, try to allocate on the same
+        * NUMA node. It can be reallocated later in numa_realloc().
+        */
+       if (dev->nr_vring > 0)
+               numa_node = dev->virtqueue[0]->numa_node;
+
+       dev->nr_guest_pages = 0;
+       if (dev->guest_pages == NULL) {
+               dev->max_guest_pages = 8;
+               dev->guest_pages = rte_zmalloc_socket(NULL,
+                                       dev->max_guest_pages *
+                                       sizeof(struct guest_page),
+                                       RTE_CACHE_LINE_SIZE,
+                                       numa_node);
+               if (dev->guest_pages == NULL) {
+                       VHOST_CONFIG_LOG(dev->ifname, ERR,
+                               "failed to allocate memory for 
dev->guest_pages");
+                       return -1;
+               }
+       }
+
+       dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct 
rte_vhost_memory) +
+               sizeof(struct rte_vhost_mem_region) * 
VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
+       if (dev->mem == NULL) {
+               VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory 
for dev->mem");
+               rte_free(dev->guest_pages);
+               dev->guest_pages = NULL;
+               return -1;
+       }
+
+       return 0;
+}
+
 static int
 vhost_user_set_mem_table(struct virtio_net **pdev,
                        struct vhu_msg_context *ctx,
@@ -1393,7 +1452,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        struct VhostUserMemory *memory = &ctx->msg.payload.memory;
        struct rte_vhost_mem_region *reg;
-       int numa_node = SOCKET_ID_ANY;
        uint64_t mmap_offset;
        uint32_t i;
        bool async_notify = false;
@@ -1438,39 +1496,13 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
                if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
                        vhost_user_iotlb_flush_all(dev);
 
-               free_mem_region(dev);
+               free_all_mem_regions(dev);
                rte_free(dev->mem);
                dev->mem = NULL;
        }
 
-       /*
-        * If VQ 0 has already been allocated, try to allocate on the same
-        * NUMA node. It can be reallocated later in numa_realloc().
-        */
-       if (dev->nr_vring > 0)
-               numa_node = dev->virtqueue[0]->numa_node;
-
-       dev->nr_guest_pages = 0;
-       if (dev->guest_pages == NULL) {
-               dev->max_guest_pages = 8;
-               dev->guest_pages = rte_zmalloc_socket(NULL,
-                                       dev->max_guest_pages *
-                                       sizeof(struct guest_page),
-                                       RTE_CACHE_LINE_SIZE,
-                                       numa_node);
-               if (dev->guest_pages == NULL) {
-                       VHOST_CONFIG_LOG(dev->ifname, ERR,
-                               "failed to allocate memory for 
dev->guest_pages");
-                       goto close_msg_fds;
-               }
-       }
-
-       dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct 
rte_vhost_memory) +
-               sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, 
numa_node);
-       if (dev->mem == NULL) {
-               VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory 
for dev->mem");
-               goto free_guest_pages;
-       }
+       if (vhost_user_initialize_memory(pdev) < 0)
+               goto close_msg_fds;
 
        for (i = 0; i < memory->nregions; i++) {
                reg = &dev->mem->regions[i];
@@ -1534,11 +1566,182 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
        return RTE_VHOST_MSG_RESULT_OK;
 
 free_mem_table:
-       free_mem_region(dev);
+       free_all_mem_regions(dev);
        rte_free(dev->mem);
        dev->mem = NULL;
+       rte_free(dev->guest_pages);
+       dev->guest_pages = NULL;
+close_msg_fds:
+       close_msg_fds(ctx);
+       return RTE_VHOST_MSG_RESULT_ERR;
+}
+
+
+static int
+vhost_user_get_max_mem_slots(struct virtio_net **pdev __rte_unused,
+                       struct vhu_msg_context *ctx,
+                       int main_fd __rte_unused)
+{
+       uint32_t max_mem_slots = VHOST_MEMORY_MAX_NREGIONS;
+
+       ctx->msg.payload.u64 = (uint64_t)max_mem_slots;
+       ctx->msg.size = sizeof(ctx->msg.payload.u64);
+       ctx->fd_num = 0;
 
-free_guest_pages:
+       return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_add_mem_reg(struct virtio_net **pdev,
+                       struct vhu_msg_context *ctx,
+                       int main_fd __rte_unused)
+{
+       struct virtio_net *dev = *pdev;
+       struct VhostUserMemoryRegion *region = 
&ctx->msg.payload.memory_single.region;
+       uint32_t i;
+
+       /* make sure new region will fit */
+       if (dev->mem != NULL && dev->mem->nregions >= 
VHOST_MEMORY_MAX_NREGIONS) {
+               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                       "too many memory regions already (%u)",
+                       dev->mem->nregions);
+               goto close_msg_fds;
+       }
+
+       /* make sure supplied memory fd present */
+       if (ctx->fd_num != 1) {
+               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                       "fd count makes no sense (%u)",
+                       ctx->fd_num);
+               goto close_msg_fds;
+       }
+
+       /* Make sure no overlap in guest virtual address space */
+       if (dev->mem != NULL && dev->mem->nregions > 0) {
+               for (uint32_t i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+                       struct rte_vhost_mem_region *current_region = 
&dev->mem->regions[i];
+
+                       if (current_region->mmap_size == 0)
+                               continue;
+
+                       uint64_t current_region_guest_start = 
current_region->guest_user_addr;
+                       uint64_t current_region_guest_end = 
current_region_guest_start
+                                                               + 
current_region->mmap_size - 1;
+                       uint64_t proposed_region_guest_start = 
region->userspace_addr;
+                       uint64_t proposed_region_guest_end = 
proposed_region_guest_start
+                                                               + 
region->memory_size - 1;
+                       bool overlap = false;
+
+                       bool curent_region_guest_start_overlap =
+                               current_region_guest_start >= 
proposed_region_guest_start
+                               && current_region_guest_start <= 
proposed_region_guest_end;
+                       bool curent_region_guest_end_overlap =
+                               current_region_guest_end >= 
proposed_region_guest_start
+                               && current_region_guest_end <= 
proposed_region_guest_end;
+                       bool proposed_region_guest_start_overlap =
+                               proposed_region_guest_start >= 
current_region_guest_start
+                               && proposed_region_guest_start <= 
current_region_guest_end;
+                       bool proposed_region_guest_end_overlap =
+                               proposed_region_guest_end >= 
current_region_guest_start
+                               && proposed_region_guest_end <= 
current_region_guest_end;
+
+                       overlap = curent_region_guest_start_overlap
+                               || curent_region_guest_end_overlap
+                               || proposed_region_guest_start_overlap
+                               || proposed_region_guest_end_overlap;
+
+                       if (overlap) {
+                               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                                       "requested memory region overlaps with 
another region");
+                               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                                       "\tRequested region address:0x%" PRIx64,
+                                       region->userspace_addr);
+                               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                                       "\tRequested region size:0x%" PRIx64,
+                                       region->memory_size);
+                               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                                       "\tOverlapping region address:0x%" 
PRIx64,
+                                       current_region->guest_user_addr);
+                               VHOST_CONFIG_LOG(dev->ifname, ERR,
+                                       "\tOverlapping region size:0x%" PRIx64,
+                                       current_region->mmap_size);
+                               goto close_msg_fds;
+                       }
+
+               }
+       }
+
+       /* convert first region add to normal memory table set */
+       if (dev->mem == NULL) {
+               if (vhost_user_initialize_memory(pdev) < 0)
+                       goto close_msg_fds;
+       }
+
+       /* find a new region and set it like memory table set does */
+       struct rte_vhost_mem_region *reg = NULL;
+       uint64_t mmap_offset;
+
+       for (uint32_t i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+               if (dev->mem->regions[i].guest_user_addr == 0) {
+                       reg = &dev->mem->regions[i];
+                       break;
+               }
+       }
+       if (reg == NULL) {
+               VHOST_CONFIG_LOG(dev->ifname, ERR, "no free memory region");
+               goto close_msg_fds;
+       }
+
+       reg->guest_phys_addr = region->guest_phys_addr;
+       reg->guest_user_addr = region->userspace_addr;
+       reg->size            = region->memory_size;
+       reg->fd              = ctx->fds[0];
+
+       mmap_offset = region->mmap_offset;
+
+       if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
+               VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region");
+               goto close_msg_fds;
+       }
+
+       dev->mem->nregions++;
+
+       if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+               async_dma_map(dev, true);
+
+       if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
+               goto free_mem_table;
+
+       for (i = 0; i < dev->nr_vring; i++) {
+               struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+               if (!vq)
+                       continue;
+
+               if (vq->desc || vq->avail || vq->used) {
+                       /* vhost_user_lock_all_queue_pairs locked all qps */
+                       VHOST_USER_ASSERT_LOCK(dev, vq, 
VHOST_USER_SET_MEM_TABLE);
+
+                       /*
+                        * If the memory table got updated, the ring addresses
+                        * need to be translated again as virtual addresses have
+                        * changed.
+                        */
+                       vring_invalidate(dev, vq);
+
+                       translate_ring_addresses(&dev, &vq);
+                       *pdev = dev;
+               }
+       }
+
+       dump_guest_pages(dev);
+
+       return RTE_VHOST_MSG_RESULT_OK;
+
+free_mem_table:
+       free_all_mem_regions(dev);
+       rte_free(dev->mem);
+       dev->mem = NULL;
        rte_free(dev->guest_pages);
        dev->guest_pages = NULL;
 close_msg_fds:
@@ -1546,6 +1749,40 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
        return RTE_VHOST_MSG_RESULT_ERR;
 }
 
+static int
+vhost_user_rem_mem_reg(struct virtio_net **pdev __rte_unused,
+                       struct vhu_msg_context *ctx __rte_unused,
+                       int main_fd __rte_unused)
+{
+       struct virtio_net *dev = *pdev;
+       struct VhostUserMemoryRegion *region = 
&ctx->msg.payload.memory_single.region;
+
+       if (dev->mem != NULL && dev->mem->nregions > 0) {
+               for (uint32_t i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+                       struct rte_vhost_mem_region *current_region = 
&dev->mem->regions[i];
+
+                       if (current_region->guest_user_addr == 0)
+                               continue;
+
+                       /*
+                        * According to the vhost-user specification:
+                        * The memory region to be removed is identified by its 
guest address,
+                        * user address and size. The mmap offset is ignored.
+                        */
+                       if (region->userspace_addr == 
current_region->guest_user_addr
+                               && region->guest_phys_addr == 
current_region->guest_phys_addr
+                               && region->memory_size == current_region->size) 
{
+                               free_mem_region(current_region);
+                               dev->mem->nregions--;
+                               return RTE_VHOST_MSG_RESULT_OK;
+                       }
+               }
+       }
+
+       VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to find region");
+       return RTE_VHOST_MSG_RESULT_ERR;
+}
+
 static bool
 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
diff --git a/lib/vhost/vhost_user.h b/lib/vhost/vhost_user.h
index ef486545ba..5a0e747b58 100644
--- a/lib/vhost/vhost_user.h
+++ b/lib/vhost/vhost_user.h
@@ -32,6 +32,7 @@
                                         (1ULL << 
VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
                                         (1ULL << 
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
                                         (1ULL << 
VHOST_USER_PROTOCOL_F_PAGEFAULT) | \
+                                        (1ULL << 
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS) | \
                                         (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
 
 typedef enum VhostUserRequest {
@@ -67,6 +68,9 @@ typedef enum VhostUserRequest {
        VHOST_USER_POSTCOPY_END = 30,
        VHOST_USER_GET_INFLIGHT_FD = 31,
        VHOST_USER_SET_INFLIGHT_FD = 32,
+       VHOST_USER_GET_MAX_MEM_SLOTS = 36,
+       VHOST_USER_ADD_MEM_REG = 37,
+       VHOST_USER_REM_MEM_REG = 38,
        VHOST_USER_SET_STATUS = 39,
        VHOST_USER_GET_STATUS = 40,
 } VhostUserRequest;
@@ -91,6 +95,11 @@ typedef struct VhostUserMemory {
        VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
 } VhostUserMemory;
 
+typedef struct VhostUserSingleMemReg {
+       uint64_t padding;
+       VhostUserMemoryRegion region;
+} VhostUserSingleMemReg;
+
 typedef struct VhostUserLog {
        uint64_t mmap_size;
        uint64_t mmap_offset;
@@ -186,6 +195,7 @@ typedef struct __rte_packed_begin VhostUserMsg {
                struct vhost_vring_state state;
                struct vhost_vring_addr addr;
                VhostUserMemory memory;
+               VhostUserSingleMemReg memory_single;
                VhostUserLog    log;
                struct vhost_iotlb_msg iotlb;
                VhostUserCryptoSessionParam crypto_session;
-- 
2.43.0

Reply via email to