amdxdna_flush() was introduced to ensure that the device does not access
a process address space after it has been freed. However, this is no
longer necessary because the driver now increments the mm reference count
when a command is submitted and decrements it only after the command has
completed. This guarantees that the process address space remains valid
for the entire duration of command execution. Remove amdxdna_flush to
simplify the teardown path.

Signed-off-by: Lizhi Hou <[email protected]>
---
 drivers/accel/amdxdna/amdxdna_pci_drv.c | 29 ++++++++++---------------
 1 file changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c 
b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 1973ab67721b..fcf7e7869007 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -105,43 +105,38 @@ static int amdxdna_drm_open(struct drm_device *ddev, 
struct drm_file *filp)
        return ret;
 }
 
-static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+static void amdxdna_client_cleanup(struct amdxdna_client *client)
 {
-       struct amdxdna_client *client = filp->driver_priv;
-       struct amdxdna_dev *xdna = to_xdna_dev(ddev);
-
-       XDNA_DBG(xdna, "closing pid %d", client->pid);
-
+       list_del(&client->node);
+       amdxdna_hwctx_remove_all(client);
        xa_destroy(&client->hwctx_xa);
        cleanup_srcu_struct(&client->hwctx_srcu);
        mutex_destroy(&client->mm_lock);
+
        if (client->dev_heap)
                drm_gem_object_put(to_gobj(client->dev_heap));
 
        iommu_sva_unbind_device(client->sva);
 
-       XDNA_DBG(xdna, "pid %d closed", client->pid);
        kfree(client);
 }
 
-static int amdxdna_flush(struct file *f, fl_owner_t id)
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
 {
-       struct drm_file *filp = f->private_data;
        struct amdxdna_client *client = filp->driver_priv;
-       struct amdxdna_dev *xdna = client->xdna;
+       struct amdxdna_dev *xdna = to_xdna_dev(ddev);
        int idx;
 
-       XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+       XDNA_DBG(xdna, "closing pid %d", client->pid);
+
        if (!drm_dev_enter(&xdna->ddev, &idx))
-               return 0;
+               return;
 
        mutex_lock(&xdna->dev_lock);
-       list_del_init(&client->node);
-       amdxdna_hwctx_remove_all(client);
+       amdxdna_client_cleanup(client);
        mutex_unlock(&xdna->dev_lock);
 
        drm_dev_exit(idx);
-       return 0;
 }
 
 static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
@@ -217,7 +212,6 @@ static const struct file_operations amdxdna_fops = {
        .owner          = THIS_MODULE,
        .open           = accel_open,
        .release        = drm_release,
-       .flush          = amdxdna_flush,
        .unlocked_ioctl = drm_ioctl,
        .compat_ioctl   = drm_compat_ioctl,
        .poll           = drm_poll,
@@ -333,8 +327,7 @@ static void amdxdna_remove(struct pci_dev *pdev)
        client = list_first_entry_or_null(&xdna->client_list,
                                          struct amdxdna_client, node);
        while (client) {
-               list_del_init(&client->node);
-               amdxdna_hwctx_remove_all(client);
+               amdxdna_client_cleanup(client);
 
                client = list_first_entry_or_null(&xdna->client_list,
                                                  struct amdxdna_client, node);
-- 
2.34.1

Reply via email to