[Intel-gfx] [CI 2/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range

2016-10-14 Thread Michał Winiarski
Let's use more top-down approach, where each gen8_ppgtt_clear_* function
is responsible for clearing the struct passed as an argument and calling
relevant clear_range functions on lower-level tables.
Doing this rather than operating on PTE ranges makes the implementation
of shrinking page tables quite simple.

v2: Drop min when calculating num_entries, no negation in 48b ppgtt
check, no newlines in vars block (Joonas)

Cc: Chris Wilson 
Cc: Joonas Lahtinen 
Cc: Michel Thierry 
Cc: Mika Kuoppala 
Signed-off-by: Michał Winiarski 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++-
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 48ec9c5..c284d8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -704,59 +704,78 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(>pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-  struct i915_page_directory_pointer *pdp,
-  uint64_t start,
-  uint64_t length,
-  gen8_pte_t scratch_pte)
+static void gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+   struct i915_page_table *pt,
+   uint64_t start,
+   uint64_t length)
 {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+   unsigned int pte_start = gen8_pte_index(start);
+   unsigned int num_entries = gen8_pte_count(start, length);
+   uint64_t pte;
gen8_pte_t *pt_vaddr;
-   unsigned pdpe = gen8_pdpe_index(start);
-   unsigned pde = gen8_pde_index(start);
-   unsigned pte = gen8_pte_index(start);
-   unsigned num_entries = length >> PAGE_SHIFT;
-   unsigned last_pte, i;
+   gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+I915_CACHE_LLC);
 
-   if (WARN_ON(!pdp))
+   if (WARN_ON(!px_page(pt)))
return;
 
-   while (num_entries) {
-   struct i915_page_directory *pd;
-   struct i915_page_table *pt;
+   bitmap_clear(pt->used_ptes, pte_start, num_entries);
 
-   if (WARN_ON(!pdp->page_directory[pdpe]))
-   break;
+   pt_vaddr = kmap_px(pt);
+
+   for (pte = pte_start; pte < num_entries; pte++)
+   pt_vaddr[pte] = scratch_pte;
 
-   pd = pdp->page_directory[pdpe];
+   kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+   struct i915_page_directory *pd,
+   uint64_t start,
+   uint64_t length)
+{
+   struct i915_page_table *pt;
+   uint64_t pde;
 
+   gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
 
-   pt = pd->page_table[pde];
+   gen8_ppgtt_clear_pt(vm, pt, start, length);
+   }
+}
 
-   if (WARN_ON(!px_page(pt)))
-   break;
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+struct i915_page_directory_pointer *pdp,
+uint64_t start,
+uint64_t length)
+{
+   struct i915_page_directory *pd;
+   uint64_t pdpe;
 
-   last_pte = pte + num_entries;
-   if (last_pte > GEN8_PTES)
-   last_pte = GEN8_PTES;
+   gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+   if (WARN_ON(!pdp->page_directory[pdpe]))
+   break;
 
-   pt_vaddr = kmap_px(pt);
+   gen8_ppgtt_clear_pd(vm, pd, start, length);
+   }
+}
 
-   for (i = pte; i < last_pte; i++) {
-   pt_vaddr[i] = scratch_pte;
-   num_entries--;
-   }
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
+{
+   struct i915_page_directory_pointer *pdp;
+   uint64_t pml4e;
 
-   kunmap_px(ppgtt, pt_vaddr);
+   gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+   if (WARN_ON(!pml4->pdps[pml4e]))
+   break;
 
-   pte = 0;
-   if (++pde == I915_PDES) {
-   if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
-

[Intel-gfx] [CI 2/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range

2016-10-13 Thread Michał Winiarski
Let's use more top-down approach, where each gen8_ppgtt_clear_* function
is responsible for clearing the struct passed as an argument and calling
relevant clear_range functions on lower-level tables.
Doing this rather than operating on PTE ranges makes the implementation
of shrinking page tables quite simple.

v2: Drop min when calculating num_entries, no negation in 48b ppgtt
check, no newlines in vars block (Joonas)

Cc: Chris Wilson 
Cc: Joonas Lahtinen 
Cc: Michel Thierry 
Cc: Mika Kuoppala 
Signed-off-by: Michał Winiarski 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++-
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 48ec9c5..c284d8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -704,59 +704,78 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(>pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-  struct i915_page_directory_pointer *pdp,
-  uint64_t start,
-  uint64_t length,
-  gen8_pte_t scratch_pte)
+static void gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+   struct i915_page_table *pt,
+   uint64_t start,
+   uint64_t length)
 {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+   unsigned int pte_start = gen8_pte_index(start);
+   unsigned int num_entries = gen8_pte_count(start, length);
+   uint64_t pte;
gen8_pte_t *pt_vaddr;
-   unsigned pdpe = gen8_pdpe_index(start);
-   unsigned pde = gen8_pde_index(start);
-   unsigned pte = gen8_pte_index(start);
-   unsigned num_entries = length >> PAGE_SHIFT;
-   unsigned last_pte, i;
+   gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+I915_CACHE_LLC);
 
-   if (WARN_ON(!pdp))
+   if (WARN_ON(!px_page(pt)))
return;
 
-   while (num_entries) {
-   struct i915_page_directory *pd;
-   struct i915_page_table *pt;
+   bitmap_clear(pt->used_ptes, pte_start, num_entries);
 
-   if (WARN_ON(!pdp->page_directory[pdpe]))
-   break;
+   pt_vaddr = kmap_px(pt);
+
+   for (pte = pte_start; pte < num_entries; pte++)
+   pt_vaddr[pte] = scratch_pte;
 
-   pd = pdp->page_directory[pdpe];
+   kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+   struct i915_page_directory *pd,
+   uint64_t start,
+   uint64_t length)
+{
+   struct i915_page_table *pt;
+   uint64_t pde;
 
+   gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
 
-   pt = pd->page_table[pde];
+   gen8_ppgtt_clear_pt(vm, pt, start, length);
+   }
+}
 
-   if (WARN_ON(!px_page(pt)))
-   break;
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+struct i915_page_directory_pointer *pdp,
+uint64_t start,
+uint64_t length)
+{
+   struct i915_page_directory *pd;
+   uint64_t pdpe;
 
-   last_pte = pte + num_entries;
-   if (last_pte > GEN8_PTES)
-   last_pte = GEN8_PTES;
+   gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+   if (WARN_ON(!pdp->page_directory[pdpe]))
+   break;
 
-   pt_vaddr = kmap_px(pt);
+   gen8_ppgtt_clear_pd(vm, pd, start, length);
+   }
+}
 
-   for (i = pte; i < last_pte; i++) {
-   pt_vaddr[i] = scratch_pte;
-   num_entries--;
-   }
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
+{
+   struct i915_page_directory_pointer *pdp;
+   uint64_t pml4e;
 
-   kunmap_px(ppgtt, pt_vaddr);
+   gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+   if (WARN_ON(!pml4->pdps[pml4e]))
+   break;
 
-   pte = 0;
-   if (++pde == I915_PDES) {
-   if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
-

[Intel-gfx] [CI 2/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range

2016-10-12 Thread Michał Winiarski
Let's use more top-down approach, where each gen8_ppgtt_clear_* function
is responsible for clearing the struct passed as an argument and calling
relevant clear_range functions on lower-level tables.
Doing this rather than operating on PTE ranges makes the implementation
of shrinking page tables quite simple.

v2: Drop min when calculating num_entries, no negation in 48b ppgtt
check, no newlines in vars block (Joonas)

Cc: Chris Wilson 
Cc: Joonas Lahtinen 
Cc: Michel Thierry 
Cc: Mika Kuoppala 
Signed-off-by: Michał Winiarski 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++-
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 48ec9c5..c284d8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -704,59 +704,78 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(>pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-  struct i915_page_directory_pointer *pdp,
-  uint64_t start,
-  uint64_t length,
-  gen8_pte_t scratch_pte)
+static void gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+   struct i915_page_table *pt,
+   uint64_t start,
+   uint64_t length)
 {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+   unsigned int pte_start = gen8_pte_index(start);
+   unsigned int num_entries = gen8_pte_count(start, length);
+   uint64_t pte;
gen8_pte_t *pt_vaddr;
-   unsigned pdpe = gen8_pdpe_index(start);
-   unsigned pde = gen8_pde_index(start);
-   unsigned pte = gen8_pte_index(start);
-   unsigned num_entries = length >> PAGE_SHIFT;
-   unsigned last_pte, i;
+   gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+I915_CACHE_LLC);
 
-   if (WARN_ON(!pdp))
+   if (WARN_ON(!px_page(pt)))
return;
 
-   while (num_entries) {
-   struct i915_page_directory *pd;
-   struct i915_page_table *pt;
+   bitmap_clear(pt->used_ptes, pte_start, num_entries);
 
-   if (WARN_ON(!pdp->page_directory[pdpe]))
-   break;
+   pt_vaddr = kmap_px(pt);
+
+   for (pte = pte_start; pte < num_entries; pte++)
+   pt_vaddr[pte] = scratch_pte;
 
-   pd = pdp->page_directory[pdpe];
+   kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+   struct i915_page_directory *pd,
+   uint64_t start,
+   uint64_t length)
+{
+   struct i915_page_table *pt;
+   uint64_t pde;
 
+   gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
 
-   pt = pd->page_table[pde];
+   gen8_ppgtt_clear_pt(vm, pt, start, length);
+   }
+}
 
-   if (WARN_ON(!px_page(pt)))
-   break;
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+struct i915_page_directory_pointer *pdp,
+uint64_t start,
+uint64_t length)
+{
+   struct i915_page_directory *pd;
+   uint64_t pdpe;
 
-   last_pte = pte + num_entries;
-   if (last_pte > GEN8_PTES)
-   last_pte = GEN8_PTES;
+   gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+   if (WARN_ON(!pdp->page_directory[pdpe]))
+   break;
 
-   pt_vaddr = kmap_px(pt);
+   gen8_ppgtt_clear_pd(vm, pd, start, length);
+   }
+}
 
-   for (i = pte; i < last_pte; i++) {
-   pt_vaddr[i] = scratch_pte;
-   num_entries--;
-   }
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
+{
+   struct i915_page_directory_pointer *pdp;
+   uint64_t pml4e;
 
-   kunmap_px(ppgtt, pt_vaddr);
+   gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+   if (WARN_ON(!pml4->pdps[pml4e]))
+   break;
 
-   pte = 0;
-   if (++pde == I915_PDES) {
-   if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
-

[Intel-gfx] [CI 2/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range

2016-10-12 Thread Michał Winiarski
Let's use more top-down approach, where each gen8_ppgtt_clear_* function
is responsible for clearing the struct passed as an argument and calling
relevant clear_range functions on lower-level tables.
Doing this rather than operating on PTE ranges makes the implementation
of shrinking page tables quite simple.

v2: Drop min when calculating num_entries, no negation in 48b ppgtt
check, no newlines in vars block (Joonas)

Cc: Chris Wilson 
Cc: Joonas Lahtinen 
Cc: Michel Thierry 
Cc: Mika Kuoppala 
Signed-off-by: Michał Winiarski 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++-
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 48ec9c5..c284d8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -704,59 +704,78 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(>pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-  struct i915_page_directory_pointer *pdp,
-  uint64_t start,
-  uint64_t length,
-  gen8_pte_t scratch_pte)
+static void gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+   struct i915_page_table *pt,
+   uint64_t start,
+   uint64_t length)
 {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+   unsigned int pte_start = gen8_pte_index(start);
+   unsigned int num_entries = gen8_pte_count(start, length);
+   uint64_t pte;
gen8_pte_t *pt_vaddr;
-   unsigned pdpe = gen8_pdpe_index(start);
-   unsigned pde = gen8_pde_index(start);
-   unsigned pte = gen8_pte_index(start);
-   unsigned num_entries = length >> PAGE_SHIFT;
-   unsigned last_pte, i;
+   gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+I915_CACHE_LLC);
 
-   if (WARN_ON(!pdp))
+   if (WARN_ON(!px_page(pt)))
return;
 
-   while (num_entries) {
-   struct i915_page_directory *pd;
-   struct i915_page_table *pt;
+   bitmap_clear(pt->used_ptes, pte_start, num_entries);
 
-   if (WARN_ON(!pdp->page_directory[pdpe]))
-   break;
+   pt_vaddr = kmap_px(pt);
+
+   for (pte = pte_start; pte < num_entries; pte++)
+   pt_vaddr[pte] = scratch_pte;
 
-   pd = pdp->page_directory[pdpe];
+   kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+   struct i915_page_directory *pd,
+   uint64_t start,
+   uint64_t length)
+{
+   struct i915_page_table *pt;
+   uint64_t pde;
 
+   gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
 
-   pt = pd->page_table[pde];
+   gen8_ppgtt_clear_pt(vm, pt, start, length);
+   }
+}
 
-   if (WARN_ON(!px_page(pt)))
-   break;
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+struct i915_page_directory_pointer *pdp,
+uint64_t start,
+uint64_t length)
+{
+   struct i915_page_directory *pd;
+   uint64_t pdpe;
 
-   last_pte = pte + num_entries;
-   if (last_pte > GEN8_PTES)
-   last_pte = GEN8_PTES;
+   gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+   if (WARN_ON(!pdp->page_directory[pdpe]))
+   break;
 
-   pt_vaddr = kmap_px(pt);
+   gen8_ppgtt_clear_pd(vm, pd, start, length);
+   }
+}
 
-   for (i = pte; i < last_pte; i++) {
-   pt_vaddr[i] = scratch_pte;
-   num_entries--;
-   }
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
+{
+   struct i915_page_directory_pointer *pdp;
+   uint64_t pml4e;
 
-   kunmap_px(ppgtt, pt_vaddr);
+   gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+   if (WARN_ON(!pml4->pdps[pml4e]))
+   break;
 
-   pte = 0;
-   if (++pde == I915_PDES) {
-   if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
-

[Intel-gfx] [CI 2/3] drm/i915/gtt: Split gen8_ppgtt_clear_pte_range

2016-10-11 Thread Michał Winiarski
Let's use more top-down approach, where each gen8_ppgtt_clear_* function
is responsible for clearing the struct passed as an argument and calling
relevant clear_range functions on lower-level tables.
Doing this rather than operating on PTE ranges makes the implementation
of shrinking page tables quite simple.

v2: Drop min when calculating num_entries, no negation in 48b ppgtt
check, no newlines in vars block (Joonas)

Cc: Chris Wilson 
Cc: Michel Thierry 
Reviewed-by: Joonas Lahtinen 
Reviewed-by: Mika Kuoppala 
Signed-off-by: Michał Winiarski 
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++-
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 08e2f35..adabf58 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -704,59 +704,78 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(>pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-  struct i915_page_directory_pointer *pdp,
-  uint64_t start,
-  uint64_t length,
-  gen8_pte_t scratch_pte)
+static void gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+   struct i915_page_table *pt,
+   uint64_t start,
+   uint64_t length)
 {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+   unsigned int pte_start = gen8_pte_index(start);
+   unsigned int num_entries = gen8_pte_count(start, length);
+   uint64_t pte;
gen8_pte_t *pt_vaddr;
-   unsigned pdpe = gen8_pdpe_index(start);
-   unsigned pde = gen8_pde_index(start);
-   unsigned pte = gen8_pte_index(start);
-   unsigned num_entries = length >> PAGE_SHIFT;
-   unsigned last_pte, i;
+   gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+I915_CACHE_LLC);
 
-   if (WARN_ON(!pdp))
+   if (WARN_ON(!px_page(pt)))
return;
 
-   while (num_entries) {
-   struct i915_page_directory *pd;
-   struct i915_page_table *pt;
+   bitmap_clear(pt->used_ptes, pte_start, num_entries);
 
-   if (WARN_ON(!pdp->page_directory[pdpe]))
-   break;
+   pt_vaddr = kmap_px(pt);
+
+   for (pte = pte_start; pte < num_entries; pte++)
+   pt_vaddr[pte] = scratch_pte;
 
-   pd = pdp->page_directory[pdpe];
+   kunmap_px(ppgtt, pt_vaddr);
+}
+
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+   struct i915_page_directory *pd,
+   uint64_t start,
+   uint64_t length)
+{
+   struct i915_page_table *pt;
+   uint64_t pde;
 
+   gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
 
-   pt = pd->page_table[pde];
+   gen8_ppgtt_clear_pt(vm, pt, start, length);
+   }
+}
 
-   if (WARN_ON(!px_page(pt)))
-   break;
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+struct i915_page_directory_pointer *pdp,
+uint64_t start,
+uint64_t length)
+{
+   struct i915_page_directory *pd;
+   uint64_t pdpe;
 
-   last_pte = pte + num_entries;
-   if (last_pte > GEN8_PTES)
-   last_pte = GEN8_PTES;
+   gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+   if (WARN_ON(!pdp->page_directory[pdpe]))
+   break;
 
-   pt_vaddr = kmap_px(pt);
+   gen8_ppgtt_clear_pd(vm, pd, start, length);
+   }
+}
 
-   for (i = pte; i < last_pte; i++) {
-   pt_vaddr[i] = scratch_pte;
-   num_entries--;
-   }
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
+{
+   struct i915_page_directory_pointer *pdp;
+   uint64_t pml4e;
 
-   kunmap_px(ppgtt, pt_vaddr);
+   gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+   if (WARN_ON(!pml4->pdps[pml4e]))
+   break;
 
-   pte = 0;
-   if (++pde == I915_PDES) {
-   if (++pdpe ==