Some architectures support weak ordering in which case better
performance is possible. IB registered memory used for data can be
weakly ordered becuase the the completion queues' buffers are
registered as strongly ordered. This will result in flushing all data
related outstanding DMA requests by the HCA when a completion is DMAed
to a completion queue buffer.

Signed-off-by: Eli Cohen <[EMAIL PROTECTED]>
Signed-off-by: Arnd Bergmann <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/umem.c |    8 ++++++--
 include/rdma/ib_umem.h         |    2 ++
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096..6a1ff26 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -51,8 +51,8 @@ static void __ib_umem_release(struct ib_device *dev, struct 
ib_umem *umem, int d
        int i;
 
        list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
-               ib_dma_unmap_sg(dev, chunk->page_list,
-                               chunk->nents, DMA_BIDIRECTIONAL);
+               ib_dma_unmap_sg_attrs(dev, chunk->page_list,
+                                     chunk->nents, DMA_BIDIRECTIONAL, 
&chunk->attrs);
                for (i = 0; i < chunk->nents; ++i) {
                        struct page *page = sg_page(&chunk->page_list[i]);
 
@@ -91,6 +91,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, 
unsigned long addr,
 
        if (dmasync)
                dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+       else
+               dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+
 
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
@@ -155,6 +158,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, 
unsigned long addr,
                if (ret < 0)
                        goto out;
 
+               chunk->attrs = attrs;
                cur_base += ret * PAGE_SIZE;
                npages   -= ret;
 
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 9ee0d2e..90f3712 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -36,6 +36,7 @@
 #include <linux/list.h>
 #include <linux/scatterlist.h>
 #include <linux/workqueue.h>
+#include <linux/dma-attrs.h>
 
 struct ib_ucontext;
 
@@ -56,6 +57,7 @@ struct ib_umem_chunk {
        struct list_head        list;
        int                     nents;
        int                     nmap;
+       struct dma_attrs        attrs;
        struct scatterlist      page_list[0];
 };
 
-- 
1.6.0.2

_______________________________________________
general mailing list
general@lists.openfabrics.org
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to