On 2/4/2026 5:15 AM, Dmitry Baryshkov wrote:
On Mon, Feb 02, 2026 at 02:51:33PM +0800, Jianping wrote:


On 1/16/2026 4:47 AM, Dmitry Baryshkov wrote:
On Thu, Jan 15, 2026 at 04:28:50PM +0800, Jianping Li wrote:
From: Ekansh Gupta <[email protected]>

fastrpc_req_munmap_impl() is called to unmap any buffer. The buffer is
getting removed from the list after it is unmapped from DSP. This can
create potential race conditions if any other thread removes the entry
from list while unmap operation is ongoing. Remove the entry before
calling unmap operation.

Fixes: 2419e55e532de ("misc: fastrpc: add mmap/unmap support")
Cc: [email protected]
Co-developed-by: Ekansh Gupta <[email protected]>
Signed-off-by: Ekansh Gupta <[email protected]>
Signed-off-by: Jianping Li <[email protected]>
---
   drivers/misc/fastrpc.c | 28 ++++++++++++++++++++--------
   1 file changed, 20 insertions(+), 8 deletions(-)

diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4f12fa5a05aa..833c265add5e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -202,6 +202,8 @@ struct fastrpc_buf {
        /* mmap support */
        struct list_head node; /* list of user requested mmaps */
        uintptr_t raddr;
+       /* Lock for buf->node */
+       spinlock_t *list_lock;

Why do you need to lock this? Isn't fl->lock enough?

According to the discussion in v1 patch:
https://lore.kernel.org/all/p6cc5lxufmefeulx5bhlh6q6ivwluqf2muj3hu5e5526fsppuu@brcy6arm7epg/

The lock is stored in fastrpc_buf here.

That was a separate topic. So, why fl->lock isn't enough? What is the
race that isn't prevented by it?
"Is fl->lock not enough?" — At the granularity of the lock, fl->lock is sufficient. And you can see buf->list_lock reuses fl->lock.

The purpose of doing this is to pass along lock together with the buf via the buf->list_lock pointer, so that all operations on buf->node no longer need to look for fl, reducing coupling and lowering the chance of errors.

If you think buf->list_lock is unnecessary, I can remove it and use fl->lock that makes more sense.



   };
   struct fastrpc_dma_buf_attachment {
@@ -441,6 +443,7 @@ static int __fastrpc_buf_alloc(struct fastrpc_user *fl, 
struct device *dev,
        buf->size = size;
        buf->dev = dev;
        buf->raddr = 0;
+       buf->list_lock = &fl->lock;
        buf->virt = dma_alloc_coherent(dev, buf->size, &buf->dma_addr,
                                       GFP_KERNEL);
@@ -1865,9 +1868,6 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user 
*fl, struct fastrpc_buf *
                                      &args[0]);
        if (!err) {
                dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
-               spin_lock(&fl->lock);
-               list_del(&buf->node);
-               spin_unlock(&fl->lock);
                fastrpc_buf_free(buf);
        } else {
                dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
@@ -1881,6 +1881,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
char __user *argp)
        struct fastrpc_buf *buf = NULL, *iter, *b;
        struct fastrpc_req_munmap req;
        struct device *dev = fl->sctx->dev;
+       int err;
        if (copy_from_user(&req, argp, sizeof(req)))
                return -EFAULT;
@@ -1888,6 +1889,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
char __user *argp)
        spin_lock(&fl->lock);
        list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
                if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
+                       list_del(&iter->node);
                        buf = iter;
                        break;
                }
@@ -1900,7 +1902,14 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
char __user *argp)
                return -EINVAL;
        }
-       return fastrpc_req_munmap_impl(fl, buf);
+       err = fastrpc_req_munmap_impl(fl, buf);
+       if (err) {
+               spin_lock(buf->list_lock);
+               list_add_tail(&buf->node, &fl->mmaps);
+               spin_unlock(buf->list_lock);
+       }
+
+       return err;
   }
   static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
@@ -1985,20 +1994,23 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, 
char __user *argp)
                }
        }
-       spin_lock(&fl->lock);
+       spin_lock(buf->list_lock);
        list_add_tail(&buf->node, &fl->mmaps);
-       spin_unlock(&fl->lock);
+       spin_unlock(buf->list_lock);
        if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
                err = -EFAULT;
-               goto err_assign;
+               goto err_copy;
        }
        dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
                buf->raddr, buf->size);
        return 0;
-
+err_copy:
+       spin_lock(buf->list_lock);
+       list_del(&buf->node);
+       spin_unlock(buf->list_lock);
   err_assign:
        fastrpc_req_munmap_impl(fl, buf);
--
2.43.0





Reply via email to