When a bio's queue supports PCI P2PDMA, set FOLL_PCI_P2PDMA for
iov_iter_get_pages_flags(). This allows PCI P2PDMA pages to be
passed from userspace and enables the NVMe passthru requests to
use P2PDMA pages.

Signed-off-by: Logan Gunthorpe <log...@deltatee.com>
---
 block/blk-map.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index df8b066cd548..1d6bcf193a42 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -236,6 +236,7 @@ static int bio_map_user_iov(struct request *rq, struct 
iov_iter *iter,
 {
        unsigned int max_sectors = queue_max_hw_sectors(rq->q);
        unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+       unsigned int flags = 0;
        struct bio *bio;
        int ret;
        int j;
@@ -248,13 +249,17 @@ static int bio_map_user_iov(struct request *rq, struct 
iov_iter *iter,
                return -ENOMEM;
        bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
 
+       if (blk_queue_pci_p2pdma(rq->q))
+               flags |= FOLL_PCI_P2PDMA;
+
        while (iov_iter_count(iter)) {
                struct page **pages;
                ssize_t bytes;
                size_t offs, added = 0;
                int npages;
 
-               bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
+               bytes = iov_iter_get_pages_alloc_flags(iter, &pages, LONG_MAX,
+                                                      &offs, flags);
                if (unlikely(bytes <= 0)) {
                        ret = bytes ? bytes : -EFAULT;
                        goto out_unmap;
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to