Christoph Hellwig wrote:
+       if (cmd_dir == ISER_DIR_OUT) {
+               /* copy the unaligned sg the buffer which is used for RDMA */
+               struct scatterlist *p_sg = (struct scatterlist *)p_mem->p_buf;
+               int i;
+               char *p;
+
+               for (p = mem, i = 0; i < p_mem->size; i++) {
+                       memcpy(p,
+                              page_address(p_sg[i].page) + p_sg[i].offset,
+                              p_sg[i].length);
+                       p += p_sg[i].length;

pages you get sent down in a sg list don't have to be kernel mapped,
you need to use kmap or kmap_atomic to access them.

DONE in r5506, see the patch below

use kmap_atomic instead of page_address in the code copying from/to SG
which is unaligned for rdma

Signed-off-by: Or Gerlitz <[EMAIL PROTECTED]>

Index: iser_memory.c
===================================================================
--- iser_memory.c       (revision 5505)
+++ iser_memory.c       (revision 5506)
@@ -140,12 +140,14 @@ int iser_start_rdma_unaligned_sg(struct
                /* copy the unaligned sg the buffer which is used for RDMA */
                struct scatterlist *sg = (struct scatterlist *)data->buf;
                int i;
-               char *p;
+               char *p, *from;

                for (p = mem, i = 0; i < data->size; i++) {
+                       from = kmap_atomic(sg[i].page, KM_USER0);
                        memcpy(p,
-                              page_address(sg[i].page) + sg[i].offset,
+                              from + sg[i].offset,
                               sg[i].length);
+                       kunmap_atomic(from, KM_USER0);
                        p += sg[i].length;
                }
        }
@@ -185,7 +187,7 @@ void iser_finalize_rdma_unaligned_sg(str
        if (ctask->dir[ISER_DIR_IN]) {
                char *mem;
                struct scatterlist *sg;
-               unsigned char *p;
+               unsigned char *p, *to;
                unsigned int sg_size;
                int i;

@@ -200,9 +202,11 @@ void iser_finalize_rdma_unaligned_sg(str
                sg_size = ctask->data[ISER_DIR_IN].size;

                for (p = mem, i = 0; i < sg_size; i++){
-                       memcpy(page_address(sg[i].page) + sg[i].offset,
+                       to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
+                       memcpy(to + sg[i].offset,
                               p,
                               sg[i].length);
+                       kunmap_atomic(to, KM_SOFTIRQ0);
                        p += sg[i].length;
                }




_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to