On Tue, Nov 02, 2021 at 04:32:45PM +0200, Imre Deak wrote:
> On Tue, Nov 02, 2021 at 04:15:18PM +0200, Ville Syrjälä wrote:
> > On Mon, Nov 01, 2021 at 08:35:50PM +0200, Imre Deak wrote:
> > > Factor out functions that are needed by the next patch to suspend/resume
> > > the memory mappings for DPT FBs.
> > > 
> > > No functional change, except reordering during suspend the
> > > ggtt->invalidate(ggtt) call wrt. atomic_set(&ggtt->vm.open, open) and
> > > mutex_unlock(&ggtt->vm.mutex). This shouldn't matter due to the i915
> > > suspend sequence being single threaded.
> > > 
> > > Cc: Chris Wilson <[email protected]>
> > > Cc: Ville Syrjälä <[email protected]>
> > > Signed-off-by: Imre Deak <[email protected]>
> > > ---
> > >  drivers/gpu/drm/i915/gt/intel_ggtt.c | 71 +++++++++++++++++++++-------
> > >  drivers/gpu/drm/i915/gt/intel_gtt.h  |  2 +
> > >  2 files changed, 56 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c 
> > > b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > index f17383e76eb71..834dc1b6a0729 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > @@ -116,17 +116,26 @@ static bool needs_idle_maps(struct drm_i915_private 
> > > *i915)
> > >   return false;
> > >  }
> > >  
> > > -void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > > +/**
> > > + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT 
> > > VM
> > > + * @vm: The VM to suspend the mappings for
> > > + *
> > > + * Suspend the memory mappings for all objects mapped to HW via the GGTT 
> > > or a
> > > + * DPT page table.
> > > + */
> > > +void i915_ggtt_suspend_vm(struct i915_address_space *vm)
> > >  {
> > >   struct i915_vma *vma, *vn;
> > >   int open;
> > >  
> > > - mutex_lock(&ggtt->vm.mutex);
> > > + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> > > +
> > > + mutex_lock(&vm->mutex);
> > >  
> > >   /* Skip rewriting PTE on VMA unbind. */
> > > - open = atomic_xchg(&ggtt->vm.open, 0);
> > > + open = atomic_xchg(&vm->open, 0);
> > >  
> > > - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
> > > + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
> > >           GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> > >           i915_vma_wait_for_bind(vma);
> > >  
> > > @@ -139,11 +148,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > >           }
> > >   }
> > >  
> > > - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > > + vm->clear_range(vm, 0, vm->total);
> > > +
> > > + atomic_set(&vm->open, open);
> > > +
> > > + mutex_unlock(&vm->mutex);
> > > +}
> > > +
> > > +void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > > +{
> > > + i915_ggtt_suspend_vm(&ggtt->vm);
> > >   ggtt->invalidate(ggtt);
> > > - atomic_set(&ggtt->vm.open, open);
> > > -
> > > - mutex_unlock(&ggtt->vm.mutex);
> > >  
> > >   intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > >  }
> > > @@ -1253,37 +1268,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
> > >   ggtt->invalidate(ggtt);
> > >  }
> > >  
> > > -void i915_ggtt_resume(struct i915_ggtt *ggtt)
> > > +/**
> > > + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
> > > + * @vm: The VM to restore the mappings for
> > > + *
> > > + * Restore the memory mappings for all objects mapped to HW via the GGTT 
> > > or a
> > > + * DPT page table.
> > > + *
> > > + * Returns %true if restoring the mapping for any object that was in a 
> > > write
> > > + * domain before suspend.
> > > + */
> > > +bool i915_ggtt_resume_vm(struct i915_address_space *vm)
> > >  {
> > >   struct i915_vma *vma;
> > > - bool flush = false;
> > > + bool write_domain_objs = false;
> > >   int open;
> > >  
> > > - intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > > + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> > >  
> > >   /* First fill our portion of the GTT with scratch pages */
> > > - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > > + vm->clear_range(vm, 0, vm->total);
> > >  
> > >   /* Skip rewriting PTE on VMA unbind. */
> > > - open = atomic_xchg(&ggtt->vm.open, 0);
> > > + open = atomic_xchg(&vm->open, 0);
> > >  
> > >   /* clflush objects bound into the GGTT and rebind them. */
> > > - list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
> > > + list_for_each_entry(vma, &vm->bound_list, vm_link) {
> > >           struct drm_i915_gem_object *obj = vma->obj;
> > >           unsigned int was_bound =
> > >                   atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
> > >  
> > >           GEM_BUG_ON(!was_bound);
> > > -         vma->ops->bind_vma(&ggtt->vm, NULL, vma,
> > > +         vma->ops->bind_vma(vm, NULL, vma,
> > >                              obj ? obj->cache_level : 0,
> > >                              was_bound);
> > 
> > Can we even get here with DPT? Ie. shouldn't everything have been 
> > thrown out during suspend?
> 
> After calling vma->ops->unbind_vma() the DPT object still remains on the
> vm->bound_list, it's only supposed to clear the PTE entries (atm we're
> not doing that for DPT). Here we re-instate the PTEs for the same
> objects still on the bound_list.

Ah, I thought we just evict everything not pinned. But I guess not.

-- 
Ville Syrjälä
Intel

Reply via email to