diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-10-28 13:58:37 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2016-10-28 20:53:47 +0100 |
commit | 1233e2db199dea015391db03d3478b3392201c41 (patch) | |
tree | 264eee9509dcecd6cdbeeb44200e5ce1af54b0c6 /drivers/gpu/drm/i915/i915_gem_userptr.c | |
parent | 03ac84f1830ec0b90f622500591eb3cc554ee479 (diff) | |
download | linux-1233e2db199dea015391db03d3478b3392201c41.tar.gz linux-1233e2db199dea015391db03d3478b3392201c41.tar.bz2 linux-1233e2db199dea015391db03d3478b3392201c41.zip |
drm/i915: Move object backing storage manipulation to its own locking
Break the allocation of the backing storage away from struct_mutex into
a per-object lock. This allows parallel page allocation, provided we can
do so outside of struct_mutex (i.e. set-domain-ioctl, pwrite, GTT
fault), i.e. before execbuf! The increased cost of the atomic counters
are hidden behind i915_vma_pin() for the typical case of execbuf, i.e.
as the object is typically bound between execbufs, the page_pin_count is
static. The cost will be felt around set-domain and pwrite, but offset
by the improvement from reduced struct_mutex contention.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_userptr.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index a421447f1d84..6c8c7b36f7fc 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -79,7 +79,7 @@ static void cancel_userptr(struct work_struct *work) WARN_ONCE(obj->mm.pages, "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n", obj->bind_count, - obj->mm.pages_pin_count, + atomic_read(&obj->mm.pages_pin_count), obj->pin_display); i915_gem_object_put(obj); @@ -491,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; - struct drm_device *dev = obj->base.dev; const int npages = obj->base.size >> PAGE_SHIFT; struct page **pvec; int pinned, ret; @@ -527,7 +526,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) } } - mutex_lock(&dev->struct_mutex); + mutex_lock(&obj->mm.lock); if (obj->userptr.work == &work->work) { struct sg_table *pages = ERR_PTR(ret); @@ -542,13 +541,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) obj->userptr.work = ERR_CAST(pages); } - - i915_gem_object_put(obj); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&obj->mm.lock); release_pages(pvec, pinned, 0); drm_free_large(pvec); + i915_gem_object_put_unlocked(obj); put_task_struct(work->task); kfree(work); } |