On Mon, Dec 14, 2015 at 11:16:10AM +0530, [email protected] wrote:
> +static int
> +copy_content(struct drm_i915_gem_object *obj,
> +             struct drm_i915_private *i915,
> +             struct address_space *mapping)
> +{
> +     struct drm_mm_node node;
> +     int ret, i;
> +
> +     /* stolen objects are already pinned to prevent shrinkage */
> +     memset(&node, 0, sizeof(node));
> +     ret = i915_gem_insert_node_in_range(i915, &node, 4096, 0,
> +                                         0, i915->gtt.mappable_end);
> +     if (ret)
> +             return ret;
> +
> +     for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
> +             struct page *page;
> +             void *__iomem src;
> +             void *dst;
> +
> +             wmb();
> +             i915->gtt.base.insert_page(&i915->gtt.base,
> +                                        i915_gem_object_get_dma_address(obj, 
> i),
> +                                        node.start,
> +                                        I915_CACHE_NONE,
> +                                        0);
> +             wmb();
> +
> +             page = shmem_read_mapping_page(mapping, i);
> +             if (IS_ERR(page)) {
> +                     ret = PTR_ERR(page);
> +                     break;
> +             }
> +
> +             src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start);
> +             dst = kmap_atomic(page);

The wmb() barriers are here...
> +             memcpy_fromio(dst, src, PAGE_SIZE);
...and here.

> +             kunmap_atomic(dst);
> +             io_mapping_unmap_atomic(src);
> +
> +             page_cache_release(page);
> +     }
> +
> +     wmb();
> +     i915->gtt.base.clear_range(&i915->gtt.base,
> +                                node.start, node.size,
> +                                true);
> +     drm_mm_remove_node(&node);
> +     return ret;
> +}
> +
> +/**
> + * i915_gem_object_migrate_stolen_to_shmemfs() - migrates a stolen backed
> + * object to shmemfs
> + * @obj: stolen backed object to be migrated
> + *
> + * Returns: 0 on successful migration, errno on failure
> + */
> +
> +static int
> +i915_gem_object_migrate_stolen_to_shmemfs(struct drm_i915_gem_object *obj)
> +{
> +     struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +     struct i915_vma *vma, *vn;
> +     struct file *file;
> +     struct address_space *mapping;
> +     struct sg_table *stolen_pages, *shmemfs_pages;
> +     int ret;
> +
> +     if (WARN_ON_ONCE(i915_gem_object_needs_bit17_swizzle(obj)))
> +             return -EINVAL;
> +
> +     ret = i915_gem_object_set_to_gtt_domain(obj, false);
> +     if (ret)
> +             return ret;

This should be in copy_content.

> +     file = shmem_file_setup("drm mm object", obj->base.size, VM_NORESERVE);
> +     if (IS_ERR(file))
> +             return PTR_ERR(file);
> +     mapping = i915_gem_set_inode_gfp(obj->base.dev, file);
> +
> +     list_for_each_entry_safe(vma, vn, &obj->vma_list, vma_link)
> +             if (i915_vma_unbind(vma))
> +                     continue;
> +
> +     if (obj->madv != I915_MADV_WILLNEED && list_empty(&obj->vma_list)) {
> +             /* Discard the stolen reservation, and replace with
> +              * an unpopulated shmemfs object.
> +              */
> +             obj->madv = __I915_MADV_PURGED;
> +             goto swap_pages;

A goto over one line? else?

> +     }
> +
> +     ret = copy_content(obj, i915, mapping);
> +     if (ret)
> +             goto err_file;
> +
> +swap_pages:
> +     stolen_pages = obj->pages;
> +     obj->pages = NULL;
> +
> +     obj->base.filp = file;
> +     obj->base.read_domains = I915_GEM_DOMAIN_CPU;
> +     obj->base.write_domain = I915_GEM_DOMAIN_CPU;

Again, these domains are a result of copy_content.

> +
> +     /* Recreate any pinned binding with pointers to the new storage */
> +     if (!list_empty(&obj->vma_list)) {
> +             ret = i915_gem_object_get_pages_gtt(obj);
> +             if (ret) {
> +                     obj->pages = stolen_pages;
> +                     goto err_file;
> +             }
> +
> +             ret = i915_gem_object_set_to_gtt_domain(obj, true);

Why? The pages are allocated, the domain is irrelevant (just so long as
it is accurate, see above).

> +             obj->get_page.sg = obj->pages->sgl;
> +             obj->get_page.last = 0;
> +
> +             list_for_each_entry(vma, &obj->vma_list, vma_link) {
> +                     if (!drm_mm_node_allocated(&vma->node))
> +                             continue;
> +
> +                     WARN_ON(i915_vma_bind(vma,
> +                                           obj->cache_level,
> +                                           PIN_UPDATE));
> +             }
> +     } else
> +             list_del(&obj->global_list);

This is very confusing (and wrong). This should only be a result of
setting PURGED above.

> +     /* drop the stolen pin and backing */
> +     shmemfs_pages = obj->pages;
> +     obj->pages = stolen_pages;
> +
> +     i915_gem_object_unpin_pages(obj);
> +     obj->ops->put_pages(obj);
> +     if (obj->ops->release)
> +             obj->ops->release(obj);
> +
> +     obj->ops = &i915_gem_object_ops;
> +     obj->pages = shmemfs_pages;
> +
> +     return 0;
> +
> +err_file:
> +     fput(file);
> +     obj->base.filp = NULL;
> +     return ret;
> +}
> +
> +int
> +i915_gem_freeze(struct drm_device *dev)
> +{
> +     /* Called before i915_gem_suspend() when hibernating */
> +     struct drm_i915_private *i915 = to_i915(dev);
> +     struct drm_i915_gem_object *obj, *tmp;
> +     struct list_head *phase[] = {
> +             &i915->mm.unbound_list, &i915->mm.bound_list, NULL
> +     }, **p;
> +     int ret;
> +
> +     ret = i915_mutex_lock_interruptible(dev);
> +     if (ret)
> +             return ret;

Whitespace.
-Chris

> +     /* Across hibernation, the stolen area is not preserved.
> +      * Anything inside stolen must copied back to normal
> +      * memory if we wish to preserve it.
> +      */

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to