summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 7c8ff9792f7b..5c5f33f40055 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -182,6 +182,7 @@ static void schedule_out(struct i915_request *rq)
static void __guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_request **first = execlists->inflight;
struct i915_request ** const last_port = first + execlists->port_mask;
struct i915_request *last = first[0];
@@ -204,7 +205,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
* event.
*/
port = first;
- while ((rb = rb_first_cached(&execlists->queue))) {
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -224,11 +225,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = rq;
}
- rb_erase_cached(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
done:
- execlists->queue_priority_hint =
+ sched_engine->queue_priority_hint =
rb ? to_priolist(rb)->priority : INT_MIN;
if (submit) {
*port = schedule_in(last, port - execlists->inflight);
@@ -338,7 +339,7 @@ out_unlock:
static void guc_reset_cancel(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
@@ -368,7 +369,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
}
/* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
priolist_for_each_request_consume(rq, rn, p) {
@@ -378,14 +379,14 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
i915_request_mark_complete(rq);
}
- rb_erase_cached(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
+ sched_engine->queue_priority_hint = INT_MIN;
+ sched_engine->queue = RB_ROOT_CACHED;
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -514,7 +515,7 @@ static void guc_submit_request(struct i915_request *rq)
queue_request(engine, rq, rq_prio(rq));
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->sched_engine->queue.rb_root));
GEM_BUG_ON(list_empty(&rq->sched.link));
tasklet_hi_schedule(&engine->execlists.tasklet);