On 08/07/2022 09:51, Hamza Khan wrote:
Currently, when vm_power_manager exits, we are using a LIST_FOREACH
macro to iterate over VM info structures while freeing them. This
leads to use-after-free error. To address this, replace all usages of
LIST_* with TAILQ_* macros, and use the RTE_TAILQ_FOREACH_SAFE macro
to iterate and delete VM info structures.

* The change is small and doesn’t affect other code
* Testing was performed on the patch

Fixes: e8ae9b662506 ("examples/vm_power: channel manager and monitor in host")
Cc: alan.ca...@intel.com
Cc: sta...@dpdk.org

Signed-off-by: Hamza Khan <hamza.k...@intel.com>

---
V3: Update commit message
V2: Use RTE_TAILQ_* marcos
---
  examples/vm_power_manager/channel_manager.c | 20 +++++++++++---------
  1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/examples/vm_power_manager/channel_manager.c 
b/examples/vm_power_manager/channel_manager.c
index 838465ab4b..e82c26ddca 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -29,6 +29,8 @@
  #include "channel_monitor.h"
  #include "power_manager.h"
+#include "rte_tailq.h"
+
#define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1 @@ -58,16 +60,16 @@ struct virtual_machine_info {
        virDomainInfo info;
        rte_spinlock_t config_spinlock;
        int allow_query;
-       LIST_ENTRY(virtual_machine_info) vms_info;
+       RTE_TAILQ_ENTRY(virtual_machine_info) vms_info;
  };
-LIST_HEAD(, virtual_machine_info) vm_list_head;
+RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head;
static struct virtual_machine_info *
  find_domain_by_name(const char *name)
  {
        struct virtual_machine_info *info;
-       LIST_FOREACH(info, &vm_list_head, vms_info) {
+       RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) {
                if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
                        return info;
        }
@@ -878,7 +880,7 @@ add_vm(const char *vm_name)
new_domain->allow_query = 0;
        rte_spinlock_init(&(new_domain->config_spinlock));
-       LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
+       TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
        return 0;
  }
@@ -900,7 +902,7 @@ remove_vm(const char *vm_name)
                rte_spinlock_unlock(&vm_info->config_spinlock);
                return -1;
        }
-       LIST_REMOVE(vm_info, vms_info);
+       TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
        rte_spinlock_unlock(&vm_info->config_spinlock);
        rte_free(vm_info);
        return 0;
@@ -953,7 +955,7 @@ channel_manager_init(const char *path __rte_unused)
  {
        virNodeInfo info;
- LIST_INIT(&vm_list_head);
+       TAILQ_INIT(&vm_list_head);
        if (connect_hypervisor(path) < 0) {
                global_n_host_cpus = 64;
                global_hypervisor_available = 0;
@@ -1005,9 +1007,9 @@ channel_manager_exit(void)
  {
        unsigned i;
        char mask[RTE_MAX_LCORE];
-       struct virtual_machine_info *vm_info;
+       struct virtual_machine_info *vm_info, *tmp;
- LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
+       RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) {
rte_spinlock_lock(&(vm_info->config_spinlock)); @@ -1022,7 +1024,7 @@ channel_manager_exit(void)
                }
                rte_spinlock_unlock(&(vm_info->config_spinlock));
- LIST_REMOVE(vm_info, vms_info);
+               TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
                rte_free(vm_info);
        }


Acked-by: David Hunt <david.h...@intel.com>


Reply via email to