};
struct fastrpc_dma_buf_attachment {
@@ -441,6 +443,7 @@ static int __fastrpc_buf_alloc(struct fastrpc_user *fl,
struct device *dev,
buf->size = size;
buf->dev = dev;
buf->raddr = 0;
+ buf->list_lock = &fl->lock;
buf->virt = dma_alloc_coherent(dev, buf->size, &buf->dma_addr,
GFP_KERNEL);
@@ -1865,9 +1868,6 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user
*fl, struct fastrpc_buf *
&args[0]);
if (!err) {
dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
- spin_lock(&fl->lock);
- list_del(&buf->node);
- spin_unlock(&fl->lock);
fastrpc_buf_free(buf);
} else {
dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
@@ -1881,6 +1881,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl,
char __user *argp)
struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_req_munmap req;
struct device *dev = fl->sctx->dev;
+ int err;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -1888,6 +1889,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl,
char __user *argp)
spin_lock(&fl->lock);
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
+ list_del(&iter->node);
buf = iter;
break;
}
@@ -1900,7 +1902,14 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl,
char __user *argp)
return -EINVAL;
}
- return fastrpc_req_munmap_impl(fl, buf);
+ err = fastrpc_req_munmap_impl(fl, buf);
+ if (err) {
+ spin_lock(buf->list_lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(buf->list_lock);
+ }
+
+ return err;
}
static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
@@ -1985,20 +1994,23 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl,
char __user *argp)
}
}
- spin_lock(&fl->lock);
+ spin_lock(buf->list_lock);
list_add_tail(&buf->node, &fl->mmaps);
- spin_unlock(&fl->lock);
+ spin_unlock(buf->list_lock);
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
err = -EFAULT;
- goto err_assign;
+ goto err_copy;
}
dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
buf->raddr, buf->size);
return 0;
-
+err_copy:
+ spin_lock(buf->list_lock);
+ list_del(&buf->node);
+ spin_unlock(buf->list_lock);
err_assign:
fastrpc_req_munmap_impl(fl, buf);
--
2.43.0