IB/core - Add DMA mapping functions to allow device drivers to interpose

The QLogic InfiniPath HCAs use programmed I/O instead of HW DMA.
This patch allows a verbs device driver to interpose on DMA mapping
function calls in order to avoid relying on bus_to_virt() and
phys_to_virt() to undo the mappings created by dma_map_single(),
dma_map_sg(), etc.

From: Ralph Campbell <[EMAIL PROTECTED]>

diff -r f37bd0e41fec include/rdma/ib_verbs.h
--- a/include/rdma/ib_verbs.h   Thu Oct 26 21:44:41 2006 +0700
+++ b/include/rdma/ib_verbs.h   Thu Oct 26 16:10:04 2006 -0800
@@ -43,6 +43,8 @@
 
 #include <linux/types.h>
 #include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/atomic.h>
 #include <asm/scatterlist.h>
@@ -846,6 +848,42 @@ struct ib_cache {
        struct ib_pkey_cache  **pkey_cache;
        struct ib_gid_cache   **gid_cache;
        u8                     *lmc_cache;
+};
+
+struct ib_dma_mapping_ops {
+       int             (*mapping_error)(struct ib_device *dev,
+                                        dma_addr_t dma_addr);
+       dma_addr_t      (*map_single)(struct ib_device *dev,
+                                     void *ptr, size_t size,
+                                     enum dma_data_direction direction);
+       void            (*unmap_single)(struct ib_device *dev,
+                                       dma_addr_t addr, size_t size,
+                                       enum dma_data_direction direction);
+       dma_addr_t      (*map_page)(struct ib_device *dev,
+                                   struct page *page, unsigned long offset,
+                                   size_t size,
+                                   enum dma_data_direction direction);
+       void            (*unmap_page)(struct ib_device *dev,
+                                     dma_addr_t addr, size_t size,
+                                     enum dma_data_direction direction);
+       int             (*map_sg)(struct ib_device *dev,
+                                 struct scatterlist *sg, int nents,
+                                 enum dma_data_direction direction);
+       void            (*unmap_sg)(struct ib_device *dev,
+                                   struct scatterlist *sg, int nents,
+                                   enum dma_data_direction direction);
+       dma_addr_t      (*dma_address)(struct ib_device *dev,
+                                      struct scatterlist *sg);
+       unsigned int    (*dma_len)(struct ib_device *dev,
+                                  struct scatterlist *sg);
+       void            (*sync_single_for_cpu)(struct ib_device *dev,
+                                              dma_addr_t dma_handle,
+                                              size_t size,
+                                              enum dma_data_direction dir);
+       void            (*sync_single_for_device)(struct ib_device *dev,
+                                                 dma_addr_t dma_handle,
+                                                 size_t size,
+                                                 enum dma_data_direction dir);
 };
 
 struct iw_cm_verbs;
@@ -992,6 +1030,8 @@ struct ib_device {
                                                  struct ib_mad *in_mad,
                                                  struct ib_mad *out_mad);
 
+       struct ib_dma_mapping_ops   *dma_ops;
+
        struct module               *owner;
        struct class_device          class_dev;
        struct kobject               ports_parent;
@@ -1395,8 +1435,182 @@ static inline int ib_req_ncomp_notif(str
  *   usable for DMA.
  * @pd: The protection domain associated with the memory region.
  * @mr_access_flags: Specifies the memory access rights.
+ *
+ * Note that the ib_dma_*() functions defined below must be used 
+ * to create/destroy addresses used with the Lkey or Rkey returned
+ * by ib_get_dma_mr().
  */
 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+
+/**
+ * ib_dma_mapping_error - check a dma_addr_t for error
+ * @device: The device for which the dma_addr was created
+ * @dma_addr: The DMA address to check
+ */
+static inline int ib_dma_mapping_error(struct ib_device *dev,
+                                      dma_addr_t dma_addr)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->mapping_error(dev, dma_addr) :
+               dma_mapping_error(dma_addr);
+}
+
+/**
+ * ib_dma_map_single - Map a kernel virtual address to DMA address
+ * @device: The device for which the dma_addr is to be created
+ * @cpu_addr: The kernel virtual address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline dma_addr_t ib_dma_map_single(struct ib_device *dev,
+                                          void *cpu_addr, size_t size,
+                                          enum dma_data_direction direction)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
+               dma_map_single(dev->dma_device, cpu_addr, size, direction);
+}
+
+/**
+ * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
+ * @device: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_single(struct ib_device *dev,
+                                      dma_addr_t addr, size_t size,
+                                      enum dma_data_direction direction)
+{
+       dev->dma_ops ?
+               dev->dma_ops->unmap_single(dev, addr, size, direction) :
+               dma_unmap_single(dev->dma_device, addr, size, direction);
+}
+
+/**
+ * ib_dma_map_page - Map a physical page to DMA address
+ * @device: The device for which the dma_addr is to be created
+ * @page: The page to be mapped
+ * @offset: The offset within the page
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline dma_addr_t ib_dma_map_page(struct ib_device *dev,
+                                        struct page *page,
+                                        unsigned long offset,
+                                        size_t size,
+                                        enum dma_data_direction direction)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->map_page(dev, page, offset, size, direction) :
+               dma_map_page(dev->dma_device, page, offset, size, direction);
+}
+
+/**
+ * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
+ * @device: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_page(struct ib_device *dev,
+                                    dma_addr_t addr, size_t size,
+                                    enum dma_data_direction direction)
+{
+       dev->dma_ops ?
+               dev->dma_ops->unmap_page(dev, addr, size, direction) :
+               dma_unmap_page(dev->dma_device, addr, size, direction);
+}
+
+/**
+ * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
+ * @device: The device for which the DMA addresses are to be created
+ * @sg: The array of scatter/gather entries
+ * @nents: The number of scatter/gather entries
+ * @direction: The direction of the DMA
+ */
+static inline int ib_dma_map_sg(struct ib_device *dev,
+                               struct scatterlist *sg, int nents,
+                               enum dma_data_direction direction)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->map_sg(dev, sg, nents, direction) :
+               dma_map_sg(dev->dma_device, sg, nents, direction);
+}
+
+/**
+ * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
+ * @device: The device for which the DMA addresses were created
+ * @sg: The array of scatter/gather entries
+ * @nents: The number of scatter/gather entries
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_sg(struct ib_device *dev,
+                                  struct scatterlist *sg, int nents,
+                                  enum dma_data_direction direction)
+{
+       dev->dma_ops ?
+               dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
+               dma_unmap_sg(dev->dma_device, sg, nents, direction);
+}
+
+/**
+ * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
+ * @device: The device for which the DMA addresses were created
+ * @sg: The scatter/gather entry
+ */
+static inline dma_addr_t ib_sg_dma_address(struct ib_device *dev,
+                                          struct scatterlist *sg)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
+}
+
+/**
+ * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
+ * @device: The device for which the DMA addresses were created
+ * @sg: The scatter/gather entry
+ */
+static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
+                                        struct scatterlist *sg)
+{
+       return dev->dma_ops ?
+               dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
+}
+
+/**
+ * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
+ * @device: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @dir: The direction of the DMA
+ */
+static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
+                                             dma_addr_t addr,
+                                             size_t size,
+                                             enum dma_data_direction dir)
+{
+       dev->dma_ops ?
+               dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
+               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+}
+
+/**
+ * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
+ * @device: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @dir: The direction of the DMA
+ */
+static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
+                                                dma_addr_t addr,
+                                                size_t size,
+                                                enum dma_data_direction dir)
+{
+       dev->dma_ops ?
+               dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
+               dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+}
 
 /**
  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
diff -r f37bd0e41fec drivers/infiniband/core/mad.c
--- a/drivers/infiniband/core/mad.c     Thu Oct 26 21:44:41 2006 +0700
+++ b/drivers/infiniband/core/mad.c     Thu Oct 26 11:14:51 2006 -0800
@@ -999,16 +999,16 @@ int ib_send_mad(struct ib_mad_send_wr_pr
 
        mad_agent = mad_send_wr->send_buf.mad_agent;
        sge = mad_send_wr->sg_list;
-       sge[0].addr = dma_map_single(mad_agent->device->dma_device,
-                                    mad_send_wr->send_buf.mad,
-                                    sge[0].length,
-                                    DMA_TO_DEVICE);
+       sge[0].addr = ib_dma_map_single(mad_agent->device,
+                                       mad_send_wr->send_buf.mad,
+                                       sge[0].length,
+                                       DMA_TO_DEVICE);
        pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
 
-       sge[1].addr = dma_map_single(mad_agent->device->dma_device,
-                                    ib_get_payload(mad_send_wr),
-                                    sge[1].length,
-                                    DMA_TO_DEVICE);
+       sge[1].addr = ib_dma_map_single(mad_agent->device,
+                                       ib_get_payload(mad_send_wr),
+                                       sge[1].length,
+                                       DMA_TO_DEVICE);
        pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
 
        spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -1027,12 +1027,14 @@ int ib_send_mad(struct ib_mad_send_wr_pr
        }
        spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
        if (ret) {
-               dma_unmap_single(mad_agent->device->dma_device,
-                                pci_unmap_addr(mad_send_wr, header_mapping),
-                                sge[0].length, DMA_TO_DEVICE);
-               dma_unmap_single(mad_agent->device->dma_device,
-                                pci_unmap_addr(mad_send_wr, payload_mapping),
-                                sge[1].length, DMA_TO_DEVICE);
+               ib_dma_unmap_single(mad_agent->device,
+                                   pci_unmap_addr(mad_send_wr,
+                                                  header_mapping),
+                                   sge[0].length, DMA_TO_DEVICE);
+               ib_dma_unmap_single(mad_agent->device,
+                                   pci_unmap_addr(mad_send_wr,
+                                                  payload_mapping),
+                                   sge[1].length, DMA_TO_DEVICE);
        }
        return ret;
 }
@@ -1851,11 +1853,11 @@ static void ib_mad_recv_done_handler(str
        mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
                                    mad_list);
        recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
-       dma_unmap_single(port_priv->device->dma_device,
-                        pci_unmap_addr(&recv->header, mapping),
-                        sizeof(struct ib_mad_private) -
-                        sizeof(struct ib_mad_private_header),
-                        DMA_FROM_DEVICE);
+       ib_dma_unmap_single(port_priv->device,
+                           pci_unmap_addr(&recv->header, mapping),
+                           sizeof(struct ib_mad_private) -
+                             sizeof(struct ib_mad_private_header),
+                           DMA_FROM_DEVICE);
 
        /* Setup MAD receive work completion from "normal" work completion */
        recv->header.wc = *wc;
@@ -2081,12 +2083,12 @@ static void ib_mad_send_done_handler(str
        qp_info = send_queue->qp_info;
 
 retry:
-       dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
-                        pci_unmap_addr(mad_send_wr, header_mapping),
-                        mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
-       dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
-                        pci_unmap_addr(mad_send_wr, payload_mapping),
-                        mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+       ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+                           pci_unmap_addr(mad_send_wr, header_mapping),
+                           mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+       ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+                           pci_unmap_addr(mad_send_wr, payload_mapping),
+                           mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
        queued_send_wr = NULL;
        spin_lock_irqsave(&send_queue->lock, flags);
        list_del(&mad_list->list);
@@ -2527,12 +2529,11 @@ static int ib_mad_post_receive_mads(stru
                                break;
                        }
                }
-               sg_list.addr = dma_map_single(qp_info->port_priv->
-                                               device->dma_device,
-                                             &mad_priv->grh,
-                                             sizeof *mad_priv -
-                                               sizeof mad_priv->header,
-                                             DMA_FROM_DEVICE);
+               sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+                                                &mad_priv->grh,
+                                                sizeof *mad_priv -
+                                                  sizeof mad_priv->header,
+                                                DMA_FROM_DEVICE);
                pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
                recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
                mad_priv->header.mad_list.mad_queue = recv_queue;
@@ -2548,12 +2549,12 @@ static int ib_mad_post_receive_mads(stru
                        list_del(&mad_priv->header.mad_list.list);
                        recv_queue->count--;
                        spin_unlock_irqrestore(&recv_queue->lock, flags);
-                       dma_unmap_single(qp_info->port_priv->device->dma_device,
-                                        pci_unmap_addr(&mad_priv->header,
-                                                       mapping),
-                                        sizeof *mad_priv -
-                                          sizeof mad_priv->header,
-                                        DMA_FROM_DEVICE);
+                       ib_dma_unmap_single(qp_info->port_priv->device,
+                                           pci_unmap_addr(&mad_priv->header,
+                                                          mapping),
+                                           sizeof *mad_priv -
+                                             sizeof mad_priv->header,
+                                           DMA_FROM_DEVICE);
                        kmem_cache_free(ib_mad_cache, mad_priv);
                        printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
                        break;
@@ -2585,11 +2586,11 @@ static void cleanup_recv_queue(struct ib
                /* Remove from posted receive MAD list */
                list_del(&mad_list->list);
 
-               dma_unmap_single(qp_info->port_priv->device->dma_device,
-                                pci_unmap_addr(&recv->header, mapping),
-                                sizeof(struct ib_mad_private) -
-                                sizeof(struct ib_mad_private_header),
-                                DMA_FROM_DEVICE);
+               ib_dma_unmap_single(qp_info->port_priv->device,
+                                   pci_unmap_addr(&recv->header, mapping),
+                                   sizeof(struct ib_mad_private) -
+                                     sizeof(struct ib_mad_private_header),
+                                   DMA_FROM_DEVICE);
                kmem_cache_free(ib_mad_cache, recv);
        }
 
diff -r f37bd0e41fec drivers/infiniband/core/uverbs_mem.c
--- a/drivers/infiniband/core/uverbs_mem.c      Thu Oct 26 21:44:41 2006 +0700
+++ b/drivers/infiniband/core/uverbs_mem.c      Thu Oct 26 11:15:36 2006 -0800
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_
        int i;
 
        list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
-               dma_unmap_sg(dev->dma_device, chunk->page_list,
-                            chunk->nents, DMA_BIDIRECTIONAL);
+               ib_dma_unmap_sg(dev, chunk->page_list,
+                               chunk->nents, DMA_BIDIRECTIONAL);
                for (i = 0; i < chunk->nents; ++i) {
                        if (umem->writable && dirty)
                                set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, s
                                chunk->page_list[i].length = PAGE_SIZE;
                        }
 
-                       chunk->nmap = dma_map_sg(dev->dma_device,
-                                                &chunk->page_list[0],
-                                                chunk->nents,
-                                                DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg(dev,
+                                                   &chunk->page_list[0],
+                                                   chunk->nents,
+                                                   DMA_BIDIRECTIONAL);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
                                        put_page(chunk->page_list[i].page);



_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to