[PATCH v2 3/7] swiotlb: Make io_tlb_overflow_buffer a physical address

2012-10-11 Thread Alexander Duyck
This change makes it so that we can avoid virt_to_phys overhead when using the
io_tlb_overflow_buffer.  My original plan was to completely remove the value
and replace it with a constant but I had seen that there were recent patches
that stated this couldn't be done until all device drivers that depended on
that functionality be updated.

Signed-off-by: Alexander Duyck 
---

 lib/swiotlb.c |   61 -
 1 files changed, 34 insertions(+), 27 deletions(-)

diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c492b84..383f780 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
  */
 static unsigned long io_tlb_overflow = 32*1024;
 
-static void *io_tlb_overflow_buffer;
+phys_addr_t io_tlb_overflow_buffer;
 
 /*
  * This is a free list describing the number of free entries available from
@@ -138,6 +138,7 @@ void swiotlb_print_info(void)
 
 void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 {
+   void *v_overflow_buffer;
unsigned long i, bytes;
 
bytes = nslabs << IO_TLB_SHIFT;
@@ -147,6 +148,15 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
io_tlb_end = io_tlb_start + bytes;
 
/*
+* Get the overflow emergency buffer
+*/
+   v_overflow_buffer = 
alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
+   if (!v_overflow_buffer)
+   panic("Cannot allocate SWIOTLB overflow buffer!\n");
+
+   io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+   /*
 * Allocate and initialize the free list array.  This array is used
 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 * between io_tlb_start and io_tlb_end.
@@ -157,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * 
sizeof(phys_addr_t)));
 
-   /*
-* Get the overflow emergency buffer
-*/
-   io_tlb_overflow_buffer = 
alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
-   if (!io_tlb_overflow_buffer)
-   panic("Cannot allocate SWIOTLB overflow buffer!\n");
if (verbose)
swiotlb_print_info();
 }
@@ -252,6 +256,7 @@ int
 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 {
unsigned long i, bytes;
+   unsigned char *v_overflow_buffer;
 
bytes = nslabs << IO_TLB_SHIFT;
 
@@ -262,6 +267,16 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
memset(tlb, 0, bytes);
 
/*
+* Get the overflow emergency buffer
+*/
+   v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+
get_order(io_tlb_overflow));
+   if (!v_overflow_buffer)
+   goto cleanup2;
+
+   io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
+
+   /*
 * Allocate and initialize the free list array.  This array is used
 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 * between io_tlb_start and io_tlb_end.
@@ -269,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
-   goto cleanup2;
+   goto cleanup3;
 
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -280,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long 
nslabs)
 get_order(io_tlb_nslabs *
   sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
-   goto cleanup3;
+   goto cleanup4;
 
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
 
-   /*
-* Get the overflow emergency buffer
-*/
-   io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
-   if (!io_tlb_overflow_buffer)
-   goto cleanup4;
-
swiotlb_print_info();
 
late_alloc = 1;
@@ -299,13 +306,13 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long 
nslabs)
return 0;
 
 cleanup4:
-   free_pages((unsigned long)io_tlb_orig_addr,
-  get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-   io_tlb_orig_addr = NULL;
-cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
 sizeof(int)));
io_tlb_list = NULL;
+cleanup3:
+   free_pages((unsigned long)v_overflow_buffer,
+  get_order(io_tlb_overflow));
+   io_tlb_overflow_buffer = 0;
 

[PATCH v2 3/7] swiotlb: Make io_tlb_overflow_buffer a physical address

2012-10-11 Thread Alexander Duyck
This change makes it so that we can avoid virt_to_phys overhead when using the
io_tlb_overflow_buffer.  My original plan was to completely remove the value
and replace it with a constant but I had seen that there were recent patches
that stated this couldn't be done until all device drivers that depended on
that functionality be updated.

Signed-off-by: Alexander Duyck alexander.h.du...@intel.com
---

 lib/swiotlb.c |   61 -
 1 files changed, 34 insertions(+), 27 deletions(-)

diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c492b84..383f780 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
  */
 static unsigned long io_tlb_overflow = 32*1024;
 
-static void *io_tlb_overflow_buffer;
+phys_addr_t io_tlb_overflow_buffer;
 
 /*
  * This is a free list describing the number of free entries available from
@@ -138,6 +138,7 @@ void swiotlb_print_info(void)
 
 void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 {
+   void *v_overflow_buffer;
unsigned long i, bytes;
 
bytes = nslabs  IO_TLB_SHIFT;
@@ -147,6 +148,15 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
io_tlb_end = io_tlb_start + bytes;
 
/*
+* Get the overflow emergency buffer
+*/
+   v_overflow_buffer = 
alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
+   if (!v_overflow_buffer)
+   panic(Cannot allocate SWIOTLB overflow buffer!\n);
+
+   io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+   /*
 * Allocate and initialize the free list array.  This array is used
 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 * between io_tlb_start and io_tlb_end.
@@ -157,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * 
sizeof(phys_addr_t)));
 
-   /*
-* Get the overflow emergency buffer
-*/
-   io_tlb_overflow_buffer = 
alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
-   if (!io_tlb_overflow_buffer)
-   panic(Cannot allocate SWIOTLB overflow buffer!\n);
if (verbose)
swiotlb_print_info();
 }
@@ -252,6 +256,7 @@ int
 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 {
unsigned long i, bytes;
+   unsigned char *v_overflow_buffer;
 
bytes = nslabs  IO_TLB_SHIFT;
 
@@ -262,6 +267,16 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
memset(tlb, 0, bytes);
 
/*
+* Get the overflow emergency buffer
+*/
+   v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+
get_order(io_tlb_overflow));
+   if (!v_overflow_buffer)
+   goto cleanup2;
+
+   io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
+
+   /*
 * Allocate and initialize the free list array.  This array is used
 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 * between io_tlb_start and io_tlb_end.
@@ -269,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
-   goto cleanup2;
+   goto cleanup3;
 
for (i = 0; i  io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -280,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long 
nslabs)
 get_order(io_tlb_nslabs *
   sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
-   goto cleanup3;
+   goto cleanup4;
 
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
 
-   /*
-* Get the overflow emergency buffer
-*/
-   io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
-   if (!io_tlb_overflow_buffer)
-   goto cleanup4;
-
swiotlb_print_info();
 
late_alloc = 1;
@@ -299,13 +306,13 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long 
nslabs)
return 0;
 
 cleanup4:
-   free_pages((unsigned long)io_tlb_orig_addr,
-  get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-   io_tlb_orig_addr = NULL;
-cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
 sizeof(int)));
io_tlb_list = NULL;
+cleanup3:
+   free_pages((unsigned long)v_overflow_buffer,
+  get_order(io_tlb_overflow));
+