Meanwhile, rename related helpers. No functional change is intended.

Reviewed-by: Kevin Tian <kevin.t...@intel.com>
Reviewed-by: Eric Auger <eric.au...@redhat.com>
Reviewed-by: Jason Gunthorpe <j...@nvidia.com>
Tested-by: Terrence Xu <terrence...@intel.com>
Tested-by: Nicolin Chen <nicol...@nvidia.com>
Tested-by: Matthew Rosato <mjros...@linux.ibm.com>
Tested-by: Yanting Jiang <yanting.ji...@intel.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.th...@huawei.com>
Signed-off-by: Yi Liu <yi.l....@intel.com>
---
 virt/kvm/vfio.c | 115 ++++++++++++++++++++++++------------------------
 1 file changed, 58 insertions(+), 57 deletions(-)

diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index b33c7b8488b3..8f7fa07e8170 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -21,7 +21,7 @@
 #include <asm/kvm_ppc.h>
 #endif
 
-struct kvm_vfio_group {
+struct kvm_vfio_file {
        struct list_head node;
        struct file *file;
 #ifdef CONFIG_SPAPR_TCE_IOMMU
@@ -30,7 +30,7 @@ struct kvm_vfio_group {
 };
 
 struct kvm_vfio {
-       struct list_head group_list;
+       struct list_head file_list;
        struct mutex lock;
        bool noncoherent;
 };
@@ -98,34 +98,35 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct 
file *file)
 }
 
 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
-                                            struct kvm_vfio_group *kvg)
+                                            struct kvm_vfio_file *kvf)
 {
-       if (WARN_ON_ONCE(!kvg->iommu_group))
+       if (WARN_ON_ONCE(!kvf->iommu_group))
                return;
 
-       kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
-       iommu_group_put(kvg->iommu_group);
-       kvg->iommu_group = NULL;
+       kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
+       iommu_group_put(kvf->iommu_group);
+       kvf->iommu_group = NULL;
 }
 #endif
 
 /*
- * Groups can use the same or different IOMMU domains.  If the same then
- * adding a new group may change the coherency of groups we've previously
- * been told about.  We don't want to care about any of that so we retest
- * each group and bail as soon as we find one that's noncoherent.  This
- * means we only ever [un]register_noncoherent_dma once for the whole device.
+ * Groups/devices can use the same or different IOMMU domains. If the same
+ * then adding a new group/device may change the coherency of groups/devices
+ * we've previously been told about. We don't want to care about any of
+ * that so we retest each group/device and bail as soon as we find one that's
+ * noncoherent.  This means we only ever [un]register_noncoherent_dma once
+ * for the whole device.
  */
 static void kvm_vfio_update_coherency(struct kvm_device *dev)
 {
        struct kvm_vfio *kv = dev->private;
        bool noncoherent = false;
-       struct kvm_vfio_group *kvg;
+       struct kvm_vfio_file *kvf;
 
        mutex_lock(&kv->lock);
 
-       list_for_each_entry(kvg, &kv->group_list, node) {
-               if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
+       list_for_each_entry(kvf, &kv->file_list, node) {
+               if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
                        noncoherent = true;
                        break;
                }
@@ -143,10 +144,10 @@ static void kvm_vfio_update_coherency(struct kvm_device 
*dev)
        mutex_unlock(&kv->lock);
 }
 
-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
 {
        struct kvm_vfio *kv = dev->private;
-       struct kvm_vfio_group *kvg;
+       struct kvm_vfio_file *kvf;
        struct file *filp;
        int ret;
 
@@ -162,27 +163,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, 
unsigned int fd)
 
        mutex_lock(&kv->lock);
 
-       list_for_each_entry(kvg, &kv->group_list, node) {
-               if (kvg->file == filp) {
+       list_for_each_entry(kvf, &kv->file_list, node) {
+               if (kvf->file == filp) {
                        ret = -EEXIST;
                        goto err_unlock;
                }
        }
 
-       kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
-       if (!kvg) {
+       kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
+       if (!kvf) {
                ret = -ENOMEM;
                goto err_unlock;
        }
 
-       kvg->file = filp;
-       list_add_tail(&kvg->node, &kv->group_list);
+       kvf->file = filp;
+       list_add_tail(&kvf->node, &kv->file_list);
 
        kvm_arch_start_assignment(dev->kvm);
 
        mutex_unlock(&kv->lock);
 
-       kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+       kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
        kvm_vfio_update_coherency(dev);
 
        return 0;
@@ -193,10 +194,10 @@ static int kvm_vfio_group_add(struct kvm_device *dev, 
unsigned int fd)
        return ret;
 }
 
-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
 {
        struct kvm_vfio *kv = dev->private;
-       struct kvm_vfio_group *kvg;
+       struct kvm_vfio_file *kvf;
        struct fd f;
        int ret;
 
@@ -208,18 +209,18 @@ static int kvm_vfio_group_del(struct kvm_device *dev, 
unsigned int fd)
 
        mutex_lock(&kv->lock);
 
-       list_for_each_entry(kvg, &kv->group_list, node) {
-               if (kvg->file != f.file)
+       list_for_each_entry(kvf, &kv->file_list, node) {
+               if (kvf->file != f.file)
                        continue;
 
-               list_del(&kvg->node);
+               list_del(&kvf->node);
                kvm_arch_end_assignment(dev->kvm);
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-               kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+               kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
 #endif
-               kvm_vfio_file_set_kvm(kvg->file, NULL);
-               fput(kvg->file);
-               kfree(kvg);
+               kvm_vfio_file_set_kvm(kvf->file, NULL);
+               fput(kvf->file);
+               kfree(kvf);
                ret = 0;
                break;
        }
@@ -234,12 +235,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, 
unsigned int fd)
 }
 
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
-                                       void __user *arg)
+static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
+                                      void __user *arg)
 {
        struct kvm_vfio_spapr_tce param;
        struct kvm_vfio *kv = dev->private;
-       struct kvm_vfio_group *kvg;
+       struct kvm_vfio_file *kvf;
        struct fd f;
        int ret;
 
@@ -254,20 +255,20 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device 
*dev,
 
        mutex_lock(&kv->lock);
 
-       list_for_each_entry(kvg, &kv->group_list, node) {
-               if (kvg->file != f.file)
+       list_for_each_entry(kvf, &kv->file_list, node) {
+               if (kvf->file != f.file)
                        continue;
 
-               if (!kvg->iommu_group) {
-                       kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
-                       if (WARN_ON_ONCE(!kvg->iommu_group)) {
+               if (!kvf->iommu_group) {
+                       kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
+                       if (WARN_ON_ONCE(!kvf->iommu_group)) {
                                ret = -EIO;
                                goto err_fdput;
                        }
                }
 
                ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
-                                                      kvg->iommu_group);
+                                                      kvf->iommu_group);
                break;
        }
 
@@ -278,8 +279,8 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device 
*dev,
 }
 #endif
 
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
-                             void __user *arg)
+static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
+                            void __user *arg)
 {
        int32_t __user *argp = arg;
        int32_t fd;
@@ -288,16 +289,16 @@ static int kvm_vfio_set_group(struct kvm_device *dev, 
long attr,
        case KVM_DEV_VFIO_GROUP_ADD:
                if (get_user(fd, argp))
                        return -EFAULT;
-               return kvm_vfio_group_add(dev, fd);
+               return kvm_vfio_file_add(dev, fd);
 
        case KVM_DEV_VFIO_GROUP_DEL:
                if (get_user(fd, argp))
                        return -EFAULT;
-               return kvm_vfio_group_del(dev, fd);
+               return kvm_vfio_file_del(dev, fd);
 
 #ifdef CONFIG_SPAPR_TCE_IOMMU
        case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
-               return kvm_vfio_group_set_spapr_tce(dev, arg);
+               return kvm_vfio_file_set_spapr_tce(dev, arg);
 #endif
        }
 
@@ -309,8 +310,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
 {
        switch (attr->group) {
        case KVM_DEV_VFIO_GROUP:
-               return kvm_vfio_set_group(dev, attr->attr,
-                                         u64_to_user_ptr(attr->addr));
+               return kvm_vfio_set_file(dev, attr->attr,
+                                        u64_to_user_ptr(attr->addr));
        }
 
        return -ENXIO;
@@ -339,16 +340,16 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
 static void kvm_vfio_release(struct kvm_device *dev)
 {
        struct kvm_vfio *kv = dev->private;
-       struct kvm_vfio_group *kvg, *tmp;
+       struct kvm_vfio_file *kvf, *tmp;
 
-       list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+       list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-               kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+               kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
 #endif
-               kvm_vfio_file_set_kvm(kvg->file, NULL);
-               fput(kvg->file);
-               list_del(&kvg->node);
-               kfree(kvg);
+               kvm_vfio_file_set_kvm(kvf->file, NULL);
+               fput(kvf->file);
+               list_del(&kvf->node);
+               kfree(kvf);
                kvm_arch_end_assignment(dev->kvm);
        }
 
@@ -382,7 +383,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
        if (!kv)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&kv->group_list);
+       INIT_LIST_HEAD(&kv->file_list);
        mutex_init(&kv->lock);
 
        dev->private = kv;
-- 
2.34.1

Reply via email to