diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-10-04 14:40:02 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-10-04 15:39:17 +0100 |
commit | 7e8057626640cfedbae000c5032be32269713687 (patch) | |
tree | e95ecef1231f0eb7fddb5115b53ce2e525607907 /drivers/gpu/drm/i915/selftests/i915_request.c | |
parent | b72348406927740d4cfb55f2c1e19c6769ffc666 (diff) | |
download | linux-7e8057626640cfedbae000c5032be32269713687.tar.gz linux-7e8057626640cfedbae000c5032be32269713687.tar.bz2 linux-7e8057626640cfedbae000c5032be32269713687.zip |
drm/i915: Drop struct_mutex from around i915_retire_requests()
We don't need to hold struct_mutex now for retiring requests, so drop it
from i915_retire_requests() and i915_gem_wait_for_idle(), finally
removing I915_WAIT_LOCKED for good.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-8-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_request.c | 151 |
1 files changed, 43 insertions, 108 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index eb175da48547..d7d68c6a6bd5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -41,21 +41,16 @@ static int igt_add_request(void *arg) { struct drm_i915_private *i915 = arg; struct i915_request *request; - int err = -ENOMEM; /* Basic preliminary test to create a request and let it loose! */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10); if (!request) - goto out_unlock; + return -ENOMEM; i915_request_add(request); - err = 0; -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } static int igt_wait_request(void *arg) @@ -67,12 +62,10 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, T); - if (!request) { - err = -ENOMEM; - goto out_unlock; - } + if (!request) + return -ENOMEM; + i915_request_get(request); if (i915_request_wait(request, 0, 0) != -ETIME) { @@ -125,9 +118,7 @@ static int igt_wait_request(void *arg) err = 0; out_request: i915_request_put(request); -out_unlock: mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -140,52 +131,45 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, T); - if (!request) { - err = -ENOMEM; - goto out_locked; - } + if (!request) + return -ENOMEM; if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); - goto out_locked; + goto out; } i915_request_add(request); - mutex_unlock(&i915->drm.struct_mutex); if (dma_fence_is_signaled(&request->fence)) { pr_err("fence signaled immediately!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) { pr_err("fence wait success after submit (expected timeout)!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { pr_err("fence wait timed out (expected success)!\n"); - goto out_device; + goto out; } if (!dma_fence_is_signaled(&request->fence)) { pr_err("fence unsignaled after waiting!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { pr_err("fence wait timed out when complete (expected success)!\n"); - goto out_device; + goto out; } err = 0; -out_device: - mutex_lock(&i915->drm.struct_mutex); -out_locked: +out: mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -199,6 +183,8 @@ static int igt_request_rewind(void *arg) mutex_lock(&i915->drm.struct_mutex); ctx[0] = mock_context(i915, "A"); + mutex_unlock(&i915->drm.struct_mutex); + ce = i915_gem_context_get_engine(ctx[0], RCS0); GEM_BUG_ON(IS_ERR(ce)); request = mock_request(ce, 2 * HZ); @@ -211,7 +197,10 @@ static int igt_request_rewind(void *arg) i915_request_get(request); i915_request_add(request); + mutex_lock(&i915->drm.struct_mutex); ctx[1] = mock_context(i915, "B"); + mutex_unlock(&i915->drm.struct_mutex); + ce = i915_gem_context_get_engine(ctx[1], RCS0); GEM_BUG_ON(IS_ERR(ce)); vip = mock_request(ce, 0); @@ -233,7 +222,6 @@ static int igt_request_rewind(void *arg) request->engine->submit_request(request); rcu_read_unlock(); - mutex_unlock(&i915->drm.struct_mutex); if (i915_request_wait(vip, 0, HZ) == -ETIME) { pr_err("timed out waiting for high priority request\n"); @@ -248,14 +236,12 @@ static int igt_request_rewind(void *arg) err = 0; err: i915_request_put(vip); - mutex_lock(&i915->drm.struct_mutex); err_context_1: mock_context_close(ctx[1]); i915_request_put(request); err_context_0: mock_context_close(ctx[0]); mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -282,7 +268,6 @@ __live_request_alloc(struct intel_context *ce) static int __igt_breadcrumbs_smoketest(void *arg) { struct smoketest *t = arg; - struct mutex * const BKL = &t->engine->i915->drm.struct_mutex; const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; const unsigned int total = 4 * t->ncontexts + 1; unsigned int num_waits = 0, num_fences = 0; @@ -337,14 +322,11 @@ static int __igt_breadcrumbs_smoketest(void *arg) struct i915_request *rq; struct intel_context *ce; - mutex_lock(BKL); - ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); rq = t->request_alloc(ce); intel_context_put(ce); if (IS_ERR(rq)) { - mutex_unlock(BKL); err = PTR_ERR(rq); count = n; break; @@ -357,8 +339,6 @@ static int __igt_breadcrumbs_smoketest(void *arg) requests[n] = i915_request_get(rq); i915_request_add(rq); - mutex_unlock(BKL); - if (err >= 0) err = i915_sw_fence_await_dma_fence(wait, &rq->fence, @@ -457,15 +437,15 @@ static int mock_breadcrumbs_smoketest(void *arg) goto out_threads; } - mutex_lock(&t.engine->i915->drm.struct_mutex); for (n = 0; n < t.ncontexts; n++) { + mutex_lock(&t.engine->i915->drm.struct_mutex); t.contexts[n] = mock_context(t.engine->i915, "mock"); + mutex_unlock(&t.engine->i915->drm.struct_mutex); if (!t.contexts[n]) { ret = -ENOMEM; goto out_contexts; } } - mutex_unlock(&t.engine->i915->drm.struct_mutex); for (n = 0; n < ncpus; n++) { threads[n] = kthread_run(__igt_breadcrumbs_smoketest, @@ -495,18 +475,15 @@ static int mock_breadcrumbs_smoketest(void *arg) atomic_long_read(&t.num_fences), ncpus); - mutex_lock(&t.engine->i915->drm.struct_mutex); out_contexts: for (n = 0; n < t.ncontexts; n++) { if (!t.contexts[n]) break; mock_context_close(t.contexts[n]); } - mutex_unlock(&t.engine->i915->drm.struct_mutex); kfree(t.contexts); out_threads: kfree(threads); - return ret; } @@ -539,7 +516,6 @@ static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; unsigned int id; int err = -ENODEV; @@ -549,28 +525,25 @@ static int live_nop_request(void *arg) * the overhead of submitting requests to the hardware. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - for_each_engine(engine, i915, id) { - struct i915_request *request = NULL; unsigned long n, prime; IGT_TIMEOUT(end_time); ktime_t times[2] = {}; err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) - goto out_unlock; + return err; for_each_prime_number_from(prime, 1, 8192) { + struct i915_request *request = NULL; + times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { + i915_request_put(request); request = i915_request_create(engine->kernel_context); - if (IS_ERR(request)) { - err = PTR_ERR(request); - goto out_unlock; - } + if (IS_ERR(request)) + return PTR_ERR(request); /* This space is left intentionally blank. * @@ -585,9 +558,11 @@ static int live_nop_request(void *arg) * for latency. */ + i915_request_get(request); i915_request_add(request); } i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(request); times[1] = ktime_sub(ktime_get_raw(), times[1]); if (prime == 1) @@ -599,7 +574,7 @@ static int live_nop_request(void *arg) err = igt_live_test_end(&t); if (err) - goto out_unlock; + return err; pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n", engine->name, @@ -607,9 +582,6 @@ static int live_nop_request(void *arg) prime, div64_u64(ktime_to_ns(times[1]), prime)); } -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -679,6 +651,7 @@ empty_request(struct intel_engine_cs *engine, if (err) goto out_request; + i915_request_get(request); out_request: i915_request_add(request); return err ? ERR_PTR(err) : request; @@ -688,7 +661,6 @@ static int live_empty_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; struct i915_vma *batch; unsigned int id; @@ -699,14 +671,9 @@ static int live_empty_request(void *arg) * the overhead of submitting requests to the hardware. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - batch = empty_batch(i915); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_unlock; - } + if (IS_ERR(batch)) + return PTR_ERR(batch); for_each_engine(engine, i915, id) { IGT_TIMEOUT(end_time); @@ -730,6 +697,7 @@ static int live_empty_request(void *arg) times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { + i915_request_put(request); request = empty_request(engine, batch); if (IS_ERR(request)) { err = PTR_ERR(request); @@ -745,6 +713,7 @@ static int live_empty_request(void *arg) if (__igt_timeout(end_time, NULL)) break; } + i915_request_put(request); err = igt_live_test_end(&t); if (err) @@ -759,9 +728,6 @@ static int live_empty_request(void *arg) out_batch: i915_vma_unpin(batch); i915_vma_put(batch); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -841,7 +807,6 @@ static int live_all_engines(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_request *request[I915_NUM_ENGINES]; - intel_wakeref_t wakeref; struct igt_live_test t; struct i915_vma *batch; unsigned int id; @@ -852,18 +817,15 @@ static int live_all_engines(void *arg) * block doing so, and that they don't complete too soon. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + return err; batch = recursive_batch(i915); if (IS_ERR(batch)) { err = PTR_ERR(batch); pr_err("%s: Unable to create batch, err=%d\n", __func__, err); - goto out_unlock; + return err; } for_each_engine(engine, i915, id) { @@ -933,9 +895,6 @@ out_request: i915_request_put(request[id]); i915_vma_unpin(batch); i915_vma_put(batch); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -945,7 +904,6 @@ static int live_sequential_engines(void *arg) struct i915_request *request[I915_NUM_ENGINES] = {}; struct i915_request *prev = NULL; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; unsigned int id; int err; @@ -956,12 +914,9 @@ static int live_sequential_engines(void *arg) * they are running on independent engines. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + return err; for_each_engine(engine, i915, id) { struct i915_vma *batch; @@ -971,7 +926,7 @@ static int live_sequential_engines(void *arg) err = PTR_ERR(batch); pr_err("%s: Unable to create batch for %s, err=%d\n", __func__, engine->name, err); - goto out_unlock; + return err; } request[id] = i915_request_create(engine->kernel_context); @@ -1063,9 +1018,6 @@ out_request: i915_vma_put(request[id]->batch); i915_request_put(request[id]); } -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1080,16 +1032,12 @@ static int __live_parallel_engine1(void *arg) struct i915_request *rq; int err; - mutex_lock(&engine->i915->drm.struct_mutex); rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) { - mutex_unlock(&engine->i915->drm.struct_mutex); + if (IS_ERR(rq)) return PTR_ERR(rq); - } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(&engine->i915->drm.struct_mutex); err = 0; if (i915_request_wait(rq, 0, HZ / 5) < 0) @@ -1115,16 +1063,11 @@ static int __live_parallel_engineN(void *arg) do { struct i915_request *rq; - mutex_lock(&engine->i915->drm.struct_mutex); rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) { - mutex_unlock(&engine->i915->drm.struct_mutex); + if (IS_ERR(rq)) return PTR_ERR(rq); - } i915_request_add(rq); - mutex_unlock(&engine->i915->drm.struct_mutex); - count++; } while (!__igt_timeout(end_time, NULL)); @@ -1154,9 +1097,7 @@ static int live_parallel_engines(void *arg) struct task_struct *tsk[I915_NUM_ENGINES] = {}; struct igt_live_test t; - mutex_lock(&i915->drm.struct_mutex); err = igt_live_test_begin(&t, i915, __func__, ""); - mutex_unlock(&i915->drm.struct_mutex); if (err) break; @@ -1184,10 +1125,8 @@ static int live_parallel_engines(void *arg) put_task_struct(tsk[id]); } - mutex_lock(&i915->drm.struct_mutex); if (igt_live_test_end(&t)) err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); } return err; @@ -1280,9 +1219,10 @@ static int live_breadcrumbs_smoketest(void *arg) goto out_threads; } - mutex_lock(&i915->drm.struct_mutex); for (n = 0; n < t[0].ncontexts; n++) { + mutex_lock(&i915->drm.struct_mutex); t[0].contexts[n] = live_context(i915, file); + mutex_unlock(&i915->drm.struct_mutex); if (!t[0].contexts[n]) { ret = -ENOMEM; goto out_contexts; @@ -1299,7 +1239,6 @@ static int live_breadcrumbs_smoketest(void *arg) t[id].max_batch = max_batches(t[0].contexts[0], engine); if (t[id].max_batch < 0) { ret = t[id].max_batch; - mutex_unlock(&i915->drm.struct_mutex); goto out_flush; } /* One ring interleaved between requests from all cpus */ @@ -1314,7 +1253,6 @@ static int live_breadcrumbs_smoketest(void *arg) &t[id], "igt/%d.%d", id, n); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); - mutex_unlock(&i915->drm.struct_mutex); goto out_flush; } @@ -1322,7 +1260,6 @@ static int live_breadcrumbs_smoketest(void *arg) threads[id * ncpus + n] = tsk; } } - mutex_unlock(&i915->drm.struct_mutex); msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); @@ -1350,10 +1287,8 @@ out_flush: pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); - mutex_lock(&i915->drm.struct_mutex); ret = igt_live_test_end(&live) ?: ret; out_contexts: - mutex_unlock(&i915->drm.struct_mutex); kfree(t[0].contexts); out_threads: kfree(threads); |