Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-17 Thread Alexey Skidanov


On 02/17/2018 01:54 AM, Laura Abbott wrote:
> On 02/16/2018 04:17 AM, Alexey Skidanov wrote:
>>
>>
>> On 02/16/2018 01:48 AM, Laura Abbott wrote:
>>> On 02/12/2018 02:33 PM, Alexey Skidanov wrote:
 Current ion kernel mapping implementation uses vmap() to map previously
 allocated buffers into kernel virtual address space.

 On 32-bit platforms, vmap() might fail on large enough buffers due
 to the
 limited available vmalloc space. dma_buf_kmap() should guarantee that
 only one page is mapped at a time.

 To fix this, kmap()/kmap_atomic() is used to implement the appropriate
 interfaces.

 Signed-off-by: Alexey Skidanov 
 ---
    drivers/staging/android/ion/ion.c | 97
 +++
    drivers/staging/android/ion/ion.h |  1 -
    2 files changed, 48 insertions(+), 50 deletions(-)

 diff --git a/drivers/staging/android/ion/ion.c
 b/drivers/staging/android/ion/ion.c
 index 57e0d80..75830e3 100644
 --- a/drivers/staging/android/ion/ion.c
 +++ b/drivers/staging/android/ion/ion.c
 @@ -27,6 +27,7 @@
    #include 
    #include 
    #include 
 +#include 
      #include "ion.h"
    @@ -119,8 +120,6 @@ static struct ion_buffer
 *ion_buffer_create(struct ion_heap *heap,
      void ion_buffer_destroy(struct ion_buffer *buffer)
    {
 -    if (WARN_ON(buffer->kmap_cnt > 0))
 -    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
    buffer->heap->ops->free(buffer);
    kfree(buffer);
    }
 @@ -140,34 +139,6 @@ static void _ion_buffer_destroy(struct ion_buffer
 *buffer)
    ion_buffer_destroy(buffer);
    }
    -static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 -{
 -    void *vaddr;
 -
 -    if (buffer->kmap_cnt) {
 -    buffer->kmap_cnt++;
 -    return buffer->vaddr;
 -    }
 -    vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
 -    if (WARN_ONCE(!vaddr,
 -  "heap->ops->map_kernel should return ERR_PTR on error"))
 -    return ERR_PTR(-EINVAL);
 -    if (IS_ERR(vaddr))
 -    return vaddr;
 -    buffer->vaddr = vaddr;
 -    buffer->kmap_cnt++;
 -    return vaddr;
 -}
 -
 -static void ion_buffer_kmap_put(struct ion_buffer *buffer)
 -{
 -    buffer->kmap_cnt--;
 -    if (!buffer->kmap_cnt) {
 -    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 -    buffer->vaddr = NULL;
 -    }
 -}
 -
    static struct sg_table *dup_sg_table(struct sg_table *table)
    {
    struct sg_table *new_table;
 @@ -305,34 +276,68 @@ static void ion_dma_buf_release(struct dma_buf
 *dmabuf)
    _ion_buffer_destroy(buffer);
    }
    +static inline int sg_page_count(struct scatterlist *sg)
 +{
 +    return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
 +}
 +
 +static struct page *ion_dma_buf_get_page(struct sg_table *sg_table,
 + unsigned long offset)
 +{
 +    struct page *page;
 +    struct scatterlist *sg;
 +    int i;
 +    unsigned int nr_pages;
 +
 +    nr_pages = 0;
 +    page = NULL;
 +    /* Find the page with specified offset */
 +    for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
 +    if (nr_pages + sg_page_count(sg) > offset) {
 +    page = nth_page(sg_page(sg), offset - nr_pages);
 +    break;
 +    }
 +
 +    nr_pages += sg_page_count(sg);
 +    }
 +
 +    return page;
 +}
    static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long
 offset)
    {
    struct ion_buffer *buffer = dmabuf->priv;
    -    return buffer->vaddr + offset * PAGE_SIZE;
 +    return kmap(ion_dma_buf_get_page(buffer->sg_table, offset));
    }
>>>
>>> This unfortunately doesn't work for uncached buffers. We need to create
>>> an uncached alias otherwise we break what little coherency that
>>> guarantees.
>>> I'm still convinced that we can't actually do the uncached buffers
>>> correctly and they should be removed...
>>>
>>> Thanks,
>>> Laura
>>>
>> I assume that your concern is possible cache coherency issue as result
>> of the memory access with the different cache attributes. Especially, if
>> these accesses are from different context.
>>
>> But dma-buf requires that the cpu access should be started with
>> dma_buf_start_cpu_access() and ended with dma_buf_end_cpu_access() (and
>> with appropriate ioctl calls from the user space) to ensure that there
>> is no IO coherency issues. That is, the appropriate cache lines are
>> invalidated before the access and are flushed after the access (in case
>> of read/write access).
>>
>> So, it seems, that in the end of each CPU access, the most update copy

Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Laura Abbott

On 02/16/2018 04:17 AM, Alexey Skidanov wrote:



On 02/16/2018 01:48 AM, Laura Abbott wrote:

On 02/12/2018 02:33 PM, Alexey Skidanov wrote:

Current ion kernel mapping implementation uses vmap() to map previously
allocated buffers into kernel virtual address space.

On 32-bit platforms, vmap() might fail on large enough buffers due to the
limited available vmalloc space. dma_buf_kmap() should guarantee that
only one page is mapped at a time.

To fix this, kmap()/kmap_atomic() is used to implement the appropriate
interfaces.

Signed-off-by: Alexey Skidanov 
---
   drivers/staging/android/ion/ion.c | 97
+++
   drivers/staging/android/ion/ion.h |  1 -
   2 files changed, 48 insertions(+), 50 deletions(-)

diff --git a/drivers/staging/android/ion/ion.c
b/drivers/staging/android/ion/ion.c
index 57e0d80..75830e3 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -27,6 +27,7 @@
   #include 
   #include 
   #include 
+#include 
     #include "ion.h"
   @@ -119,8 +120,6 @@ static struct ion_buffer
*ion_buffer_create(struct ion_heap *heap,
     void ion_buffer_destroy(struct ion_buffer *buffer)
   {
-    if (WARN_ON(buffer->kmap_cnt > 0))
-    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
   buffer->heap->ops->free(buffer);
   kfree(buffer);
   }
@@ -140,34 +139,6 @@ static void _ion_buffer_destroy(struct ion_buffer
*buffer)
   ion_buffer_destroy(buffer);
   }
   -static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
-    void *vaddr;
-
-    if (buffer->kmap_cnt) {
-    buffer->kmap_cnt++;
-    return buffer->vaddr;
-    }
-    vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-    if (WARN_ONCE(!vaddr,
-  "heap->ops->map_kernel should return ERR_PTR on error"))
-    return ERR_PTR(-EINVAL);
-    if (IS_ERR(vaddr))
-    return vaddr;
-    buffer->vaddr = vaddr;
-    buffer->kmap_cnt++;
-    return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
-    buffer->kmap_cnt--;
-    if (!buffer->kmap_cnt) {
-    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-    buffer->vaddr = NULL;
-    }
-}
-
   static struct sg_table *dup_sg_table(struct sg_table *table)
   {
   struct sg_table *new_table;
@@ -305,34 +276,68 @@ static void ion_dma_buf_release(struct dma_buf
*dmabuf)
   _ion_buffer_destroy(buffer);
   }
   +static inline int sg_page_count(struct scatterlist *sg)
+{
+    return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+static struct page *ion_dma_buf_get_page(struct sg_table *sg_table,
+ unsigned long offset)
+{
+    struct page *page;
+    struct scatterlist *sg;
+    int i;
+    unsigned int nr_pages;
+
+    nr_pages = 0;
+    page = NULL;
+    /* Find the page with specified offset */
+    for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+    if (nr_pages + sg_page_count(sg) > offset) {
+    page = nth_page(sg_page(sg), offset - nr_pages);
+    break;
+    }
+
+    nr_pages += sg_page_count(sg);
+    }
+
+    return page;
+}
   static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long
offset)
   {
   struct ion_buffer *buffer = dmabuf->priv;
   -    return buffer->vaddr + offset * PAGE_SIZE;
+    return kmap(ion_dma_buf_get_page(buffer->sg_table, offset));
   }


This unfortunately doesn't work for uncached buffers. We need to create
an uncached alias otherwise we break what little coherency that guarantees.
I'm still convinced that we can't actually do the uncached buffers
correctly and they should be removed...

Thanks,
Laura


I assume that your concern is possible cache coherency issue as result
of the memory access with the different cache attributes. Especially, if
these accesses are from different context.

But dma-buf requires that the cpu access should be started with
dma_buf_start_cpu_access() and ended with dma_buf_end_cpu_access() (and
with appropriate ioctl calls from the user space) to ensure that there
is no IO coherency issues. That is, the appropriate cache lines are
invalidated before the access and are flushed after the access (in case
of read/write access).

So, it seems, that in the end of each CPU access, the most update copy
of the buffer is in the RAM.

Probably, I'm wrong but I don't see the issue.



Part of the issue is that uncached buffers are often used so we don't
need to do any cache operations. We do the sync currently but there's
interest in removing it for uncached buffers.

I also haven't looked at aliasing rules on x86 closely but at least
on arm, coherency with different cache attributes is difficult to
get right and has a big "not recommended" warning in the description.
You may be right that the above sequence would work out with the
cpu accessors but it would need some careful review. The trade off
of "might run out of vmalloc space" vs. "cache alias nightmare" doesn't

Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Greg KH
On Fri, Feb 16, 2018 at 11:13:03PM +0200, Alexey Skidanov wrote:
> 
> 
> On 02/16/2018 10:49 PM, Greg KH wrote:
> > On Fri, Feb 16, 2018 at 10:43:03PM +0200, Alexey Skidanov wrote:
> >>
> >>
> >> On 02/16/2018 04:46 PM, Greg KH wrote:
> >>> On Tue, Feb 13, 2018 at 12:33:53AM +0200, Alexey Skidanov wrote:
>  Current ion kernel mapping implementation uses vmap() to map previously
>  allocated buffers into kernel virtual address space.
> 
>  On 32-bit platforms, vmap() might fail on large enough buffers due to the
>  limited available vmalloc space. dma_buf_kmap() should guarantee that
>  only one page is mapped at a time.
> 
>  To fix this, kmap()/kmap_atomic() is used to implement the appropriate
>  interfaces.
> 
>  Signed-off-by: Alexey Skidanov 
> >>>
> >>> Again, I said I required another intel.com signed-off-by on any of your
> >>> ion patches before I would take them.  Please get internal review first
> >>> before asking for review from the community.
> >>>
> >>> greg k-h
> >>> Is there any issue with this patch?
> > 
> > Given that no other Intel developer reviewed it, I don't know. Again,
> > use the resources you have, to ignore that is folly.
> > 
> > greg k-h
> > 
> 
> I try to improve the current ion implementation by fixing the existing
> (potential) bugs. If it's rejected even without being reviewed -
> probably it doesn't make sense to continue ...

Again, I told you what you needed to do for your future patches, I don't
know why you are ignoring that.

> BTW, it has been reviewed internally ...

Great, then have those reviewers put their names on it, that is exactly
what I asked for!  To not do so is really odd...

thanks,

greg k-h
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Alexey Skidanov


On 02/16/2018 10:49 PM, Greg KH wrote:
> On Fri, Feb 16, 2018 at 10:43:03PM +0200, Alexey Skidanov wrote:
>>
>>
>> On 02/16/2018 04:46 PM, Greg KH wrote:
>>> On Tue, Feb 13, 2018 at 12:33:53AM +0200, Alexey Skidanov wrote:
 Current ion kernel mapping implementation uses vmap() to map previously
 allocated buffers into kernel virtual address space.

 On 32-bit platforms, vmap() might fail on large enough buffers due to the
 limited available vmalloc space. dma_buf_kmap() should guarantee that
 only one page is mapped at a time.

 To fix this, kmap()/kmap_atomic() is used to implement the appropriate
 interfaces.

 Signed-off-by: Alexey Skidanov 
>>>
>>> Again, I said I required another intel.com signed-off-by on any of your
>>> ion patches before I would take them.  Please get internal review first
>>> before asking for review from the community.
>>>
>>> greg k-h
>>> Is there any issue with this patch?
> 
> Given that no other Intel developer reviewed it, I don't know. Again,
> use the resources you have, to ignore that is folly.
> 
> greg k-h
> 

I try to improve the current ion implementation by fixing the existing
(potential) bugs. If it's rejected even without being reviewed -
probably it doesn't make sense to continue ...

BTW, it has been reviewed internally ...

Alexey
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Greg KH
On Fri, Feb 16, 2018 at 10:43:03PM +0200, Alexey Skidanov wrote:
> 
> 
> On 02/16/2018 04:46 PM, Greg KH wrote:
> > On Tue, Feb 13, 2018 at 12:33:53AM +0200, Alexey Skidanov wrote:
> >> Current ion kernel mapping implementation uses vmap() to map previously
> >> allocated buffers into kernel virtual address space.
> >>
> >> On 32-bit platforms, vmap() might fail on large enough buffers due to the
> >> limited available vmalloc space. dma_buf_kmap() should guarantee that
> >> only one page is mapped at a time.
> >>
> >> To fix this, kmap()/kmap_atomic() is used to implement the appropriate
> >> interfaces.
> >>
> >> Signed-off-by: Alexey Skidanov 
> > 
> > Again, I said I required another intel.com signed-off-by on any of your
> > ion patches before I would take them.  Please get internal review first
> > before asking for review from the community.
> > 
> > greg k-h
> > Is there any issue with this patch?

Given that no other Intel developer reviewed it, I don't know. Again,
use the resources you have, to ignore that is folly.

greg k-h
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Alexey Skidanov


On 02/16/2018 04:46 PM, Greg KH wrote:
> On Tue, Feb 13, 2018 at 12:33:53AM +0200, Alexey Skidanov wrote:
>> Current ion kernel mapping implementation uses vmap() to map previously
>> allocated buffers into kernel virtual address space.
>>
>> On 32-bit platforms, vmap() might fail on large enough buffers due to the
>> limited available vmalloc space. dma_buf_kmap() should guarantee that
>> only one page is mapped at a time.
>>
>> To fix this, kmap()/kmap_atomic() is used to implement the appropriate
>> interfaces.
>>
>> Signed-off-by: Alexey Skidanov 
> 
> Again, I said I required another intel.com signed-off-by on any of your
> ion patches before I would take them.  Please get internal review first
> before asking for review from the community.
> 
> greg k-h
> Is there any issue with this patch?
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Greg KH
On Tue, Feb 13, 2018 at 12:33:53AM +0200, Alexey Skidanov wrote:
> Current ion kernel mapping implementation uses vmap() to map previously
> allocated buffers into kernel virtual address space.
> 
> On 32-bit platforms, vmap() might fail on large enough buffers due to the
> limited available vmalloc space. dma_buf_kmap() should guarantee that
> only one page is mapped at a time.
> 
> To fix this, kmap()/kmap_atomic() is used to implement the appropriate
> interfaces.
> 
> Signed-off-by: Alexey Skidanov 

Again, I said I required another intel.com signed-off-by on any of your
ion patches before I would take them.  Please get internal review first
before asking for review from the community.

greg k-h
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-16 Thread Alexey Skidanov


On 02/16/2018 01:48 AM, Laura Abbott wrote:
> On 02/12/2018 02:33 PM, Alexey Skidanov wrote:
>> Current ion kernel mapping implementation uses vmap() to map previously
>> allocated buffers into kernel virtual address space.
>>
>> On 32-bit platforms, vmap() might fail on large enough buffers due to the
>> limited available vmalloc space. dma_buf_kmap() should guarantee that
>> only one page is mapped at a time.
>>
>> To fix this, kmap()/kmap_atomic() is used to implement the appropriate
>> interfaces.
>>
>> Signed-off-by: Alexey Skidanov 
>> ---
>>   drivers/staging/android/ion/ion.c | 97
>> +++
>>   drivers/staging/android/ion/ion.h |  1 -
>>   2 files changed, 48 insertions(+), 50 deletions(-)
>>
>> diff --git a/drivers/staging/android/ion/ion.c
>> b/drivers/staging/android/ion/ion.c
>> index 57e0d80..75830e3 100644
>> --- a/drivers/staging/android/ion/ion.c
>> +++ b/drivers/staging/android/ion/ion.c
>> @@ -27,6 +27,7 @@
>>   #include 
>>   #include 
>>   #include 
>> +#include 
>>     #include "ion.h"
>>   @@ -119,8 +120,6 @@ static struct ion_buffer
>> *ion_buffer_create(struct ion_heap *heap,
>>     void ion_buffer_destroy(struct ion_buffer *buffer)
>>   {
>> -    if (WARN_ON(buffer->kmap_cnt > 0))
>> -    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
>>   buffer->heap->ops->free(buffer);
>>   kfree(buffer);
>>   }
>> @@ -140,34 +139,6 @@ static void _ion_buffer_destroy(struct ion_buffer
>> *buffer)
>>   ion_buffer_destroy(buffer);
>>   }
>>   -static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
>> -{
>> -    void *vaddr;
>> -
>> -    if (buffer->kmap_cnt) {
>> -    buffer->kmap_cnt++;
>> -    return buffer->vaddr;
>> -    }
>> -    vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
>> -    if (WARN_ONCE(!vaddr,
>> -  "heap->ops->map_kernel should return ERR_PTR on error"))
>> -    return ERR_PTR(-EINVAL);
>> -    if (IS_ERR(vaddr))
>> -    return vaddr;
>> -    buffer->vaddr = vaddr;
>> -    buffer->kmap_cnt++;
>> -    return vaddr;
>> -}
>> -
>> -static void ion_buffer_kmap_put(struct ion_buffer *buffer)
>> -{
>> -    buffer->kmap_cnt--;
>> -    if (!buffer->kmap_cnt) {
>> -    buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
>> -    buffer->vaddr = NULL;
>> -    }
>> -}
>> -
>>   static struct sg_table *dup_sg_table(struct sg_table *table)
>>   {
>>   struct sg_table *new_table;
>> @@ -305,34 +276,68 @@ static void ion_dma_buf_release(struct dma_buf
>> *dmabuf)
>>   _ion_buffer_destroy(buffer);
>>   }
>>   +static inline int sg_page_count(struct scatterlist *sg)
>> +{
>> +    return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
>> +}
>> +
>> +static struct page *ion_dma_buf_get_page(struct sg_table *sg_table,
>> + unsigned long offset)
>> +{
>> +    struct page *page;
>> +    struct scatterlist *sg;
>> +    int i;
>> +    unsigned int nr_pages;
>> +
>> +    nr_pages = 0;
>> +    page = NULL;
>> +    /* Find the page with specified offset */
>> +    for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
>> +    if (nr_pages + sg_page_count(sg) > offset) {
>> +    page = nth_page(sg_page(sg), offset - nr_pages);
>> +    break;
>> +    }
>> +
>> +    nr_pages += sg_page_count(sg);
>> +    }
>> +
>> +    return page;
>> +}
>>   static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long
>> offset)
>>   {
>>   struct ion_buffer *buffer = dmabuf->priv;
>>   -    return buffer->vaddr + offset * PAGE_SIZE;
>> +    return kmap(ion_dma_buf_get_page(buffer->sg_table, offset));
>>   }
> 
> This unfortunately doesn't work for uncached buffers. We need to create
> an uncached alias otherwise we break what little coherency that guarantees.
> I'm still convinced that we can't actually do the uncached buffers
> correctly and they should be removed...
> 
> Thanks,
> Laura
> 
I assume that your concern is possible cache coherency issue as result
of the memory access with the different cache attributes. Especially, if
these accesses are from different context.

But dma-buf requires that the cpu access should be started with
dma_buf_start_cpu_access() and ended with dma_buf_end_cpu_access() (and
with appropriate ioctl calls from the user space) to ensure that there
is no IO coherency issues. That is, the appropriate cache lines are
invalidated before the access and are flushed after the access (in case
of read/write access).

So, it seems, that in the end of each CPU access, the most update copy
of the buffer is in the RAM.

Probably, I'm wrong but I don't see the issue.

>>     static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned
>> long offset,
>>  void *ptr)
>>   {
>> +    kunmap(ptr);
>> +}
>> +
>> +static void *ion_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
>> + unsigned long offset)
>> +{
>> +    struct ion_buffer *buffer = dmabuf->priv;
>> 

Re: [PATCH] staging: android: ion: Change dma_buf_kmap()/dma_buf_kmap_atomic() implementation

2018-02-15 Thread Laura Abbott

On 02/12/2018 02:33 PM, Alexey Skidanov wrote:

Current ion kernel mapping implementation uses vmap() to map previously
allocated buffers into kernel virtual address space.

On 32-bit platforms, vmap() might fail on large enough buffers due to the
limited available vmalloc space. dma_buf_kmap() should guarantee that
only one page is mapped at a time.

To fix this, kmap()/kmap_atomic() is used to implement the appropriate
interfaces.

Signed-off-by: Alexey Skidanov 
---
  drivers/staging/android/ion/ion.c | 97 +++
  drivers/staging/android/ion/ion.h |  1 -
  2 files changed, 48 insertions(+), 50 deletions(-)

diff --git a/drivers/staging/android/ion/ion.c 
b/drivers/staging/android/ion/ion.c
index 57e0d80..75830e3 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -27,6 +27,7 @@
  #include 
  #include 
  #include 
+#include 
  
  #include "ion.h"
  
@@ -119,8 +120,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
  
  void ion_buffer_destroy(struct ion_buffer *buffer)

  {
-   if (WARN_ON(buffer->kmap_cnt > 0))
-   buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
kfree(buffer);
  }
@@ -140,34 +139,6 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer)
ion_buffer_destroy(buffer);
  }
  
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)

-{
-   void *vaddr;
-
-   if (buffer->kmap_cnt) {
-   buffer->kmap_cnt++;
-   return buffer->vaddr;
-   }
-   vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-   if (WARN_ONCE(!vaddr,
- "heap->ops->map_kernel should return ERR_PTR on error"))
-   return ERR_PTR(-EINVAL);
-   if (IS_ERR(vaddr))
-   return vaddr;
-   buffer->vaddr = vaddr;
-   buffer->kmap_cnt++;
-   return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
-   buffer->kmap_cnt--;
-   if (!buffer->kmap_cnt) {
-   buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-   buffer->vaddr = NULL;
-   }
-}
-
  static struct sg_table *dup_sg_table(struct sg_table *table)
  {
struct sg_table *new_table;
@@ -305,34 +276,68 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
_ion_buffer_destroy(buffer);
  }
  
+static inline int sg_page_count(struct scatterlist *sg)

+{
+   return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+static struct page *ion_dma_buf_get_page(struct sg_table *sg_table,
+unsigned long offset)
+{
+   struct page *page;
+   struct scatterlist *sg;
+   int i;
+   unsigned int nr_pages;
+
+   nr_pages = 0;
+   page = NULL;
+   /* Find the page with specified offset */
+   for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+   if (nr_pages + sg_page_count(sg) > offset) {
+   page = nth_page(sg_page(sg), offset - nr_pages);
+   break;
+   }
+
+   nr_pages += sg_page_count(sg);
+   }
+
+   return page;
+}
  static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
  {
struct ion_buffer *buffer = dmabuf->priv;
  
-	return buffer->vaddr + offset * PAGE_SIZE;

+   return kmap(ion_dma_buf_get_page(buffer->sg_table, offset));
  }


This unfortunately doesn't work for uncached buffers. We need to create
an uncached alias otherwise we break what little coherency that guarantees.
I'm still convinced that we can't actually do the uncached buffers
correctly and they should be removed...

Thanks,
Laura

  
  static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,

   void *ptr)
  {
+   kunmap(ptr);
+}
+
+static void *ion_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
+unsigned long offset)
+{
+   struct ion_buffer *buffer = dmabuf->priv;
+
+   return kmap_atomic(ion_dma_buf_get_page(buffer->sg_table,
+   offset));
+}
+
+static void ion_dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
+ unsigned long offset,
+ void *ptr)
+{
+   kunmap_atomic(ptr);
  }
  
  static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,

enum dma_data_direction direction)
  {
struct ion_buffer *buffer = dmabuf->priv;
-   void *vaddr;
struct ion_dma_buf_attachment *a;
  
-	/*

-* TODO: Move this elsewhere because we don't always need a vaddr
-*/
-   if (buffer->heap->ops->map_kernel) {
-   mutex_lock(>lock);
-   vaddr = ion_buffer_kmap_get(buffer);
-   mutex_unlock(>lock);
-   }
-