From aa5e9f98113bf3808beb65c8b0ad452fd94cf797 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 7 Oct 2021 16:32:10 -0700 Subject: drm/i915/gt: include tsc.h where used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are currently using tsc_khz as a fallback so add the right include. For other architectures we may need to add a different fallback, but this is not being used by dgfx so we may as well just paper it over. Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20211007233212.3896460-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_llc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index eb1a15deed22..d6c6b8b0a1e6 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -3,6 +3,7 @@ * Copyright © 2019 Intel Corporation */ +#include #include #include "i915_drv.h" -- cgit v1.2.3 From a5b51a9f8523a0b88ce7e8e8059f75a43c34c57f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 7 Oct 2021 16:32:11 -0700 Subject: drm/i915/gt: add asm/cacheflush.h for use of clflush() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's include what we use instead of relying on other indirect includes. Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20211007233212.3896460-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_engine.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 452248884ef1..d5ac49c0691e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -2,6 +2,7 @@ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ +#include #include #include -- cgit v1.2.3 From c46f4405486d953dd749613f67d3a0eec69e1b87 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 30 Sep 2021 17:58:16 -0700 Subject: drm/i915: Stop using I915_TILING_* in client blit selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The I915_TILING_* definitions in the uapi header are intended solely for tiling modes that are visible to the old de-tiling fence ioctls. Since modern hardware does not support de-tiling fences, we should not add new definitions for new tiling types going forward. However we do want the client blit selftest to eventually cover other new tiling modes (such as Tile4), so switch it to using its own enum of tiling modes. Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211001005816.73330-1-matthew.d.roper@intel.com --- .../drm/i915/gem/selftests/i915_gem_client_blt.c | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index ecbcbb86ae1e..8402ed925a69 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -17,13 +17,20 @@ #include "huge_gem_object.h" #include "mock_context.h" +enum client_tiling { + CLIENT_TILING_LINEAR, + CLIENT_TILING_X, + CLIENT_TILING_Y, + CLIENT_NUM_TILING_TYPES +}; + #define WIDTH 512 #define HEIGHT 32 struct blit_buffer { struct i915_vma *vma; u32 start_val; - u32 tiling; + enum client_tiling tiling; }; struct tiled_blits { @@ -53,9 +60,9 @@ static int prepare_blit(const struct tiled_blits *t, *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(BCS_SWCTRL); cmd = (BCS_SRC_Y | BCS_DST_Y) << 16; - if (src->tiling == I915_TILING_Y) + if (src->tiling == CLIENT_TILING_Y) cmd |= BCS_SRC_Y; - if (dst->tiling == I915_TILING_Y) + if (dst->tiling == CLIENT_TILING_Y) cmd |= BCS_DST_Y; *cs++ = cmd; @@ -172,7 +179,7 @@ static int tiled_blits_create_buffers(struct tiled_blits *t, t->buffers[i].vma = vma; t->buffers[i].tiling = - i915_prandom_u32_max_state(I915_TILING_Y + 1, prng); + i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng); } return 0; @@ -197,17 +204,17 @@ static u64 swizzle_bit(unsigned int bit, u64 offset) static u64 tiled_offset(const struct intel_gt *gt, u64 v, unsigned int stride, - unsigned int tiling) + enum client_tiling tiling) { unsigned int swizzle; u64 x, y; - if (tiling == I915_TILING_NONE) + if (tiling == CLIENT_TILING_LINEAR) return v; y = div64_u64_rem(v, stride, &x); - if (tiling == I915_TILING_X) { + if (tiling == CLIENT_TILING_X) { v = div64_u64_rem(y, 8, &y) * stride * 8; v += y * 512; v += div64_u64_rem(x, 512, &x) << 12; @@ -244,12 +251,12 @@ static u64 tiled_offset(const struct intel_gt *gt, return v; } -static const char *repr_tiling(int tiling) +static const char *repr_tiling(enum client_tiling tiling) { switch (tiling) { - case I915_TILING_NONE: return "linear"; - case I915_TILING_X: return "X"; - case I915_TILING_Y: return "Y"; + case CLIENT_TILING_LINEAR: return "linear"; + case CLIENT_TILING_X: return "X"; + case CLIENT_TILING_Y: return "Y"; default: return "unknown"; } } -- cgit v1.2.3 From b0179f0d18dd7e6fb6b1c52c49ac21365257e97e Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 21 Sep 2021 18:50:39 -0700 Subject: drm/i915: fix blank screen booting crashes 5.15-rc1 crashes with blank screen when booting up on two ThinkPads using i915. Bisections converge convincingly, but arrive at different and surprising "culprits", none of them the actual culprit. netconsole (with init_netconsole() hacked to call i915_init() when logging has started, instead of by module_init()) tells the story: kernel BUG at drivers/gpu/drm/i915/i915_sw_fence.c:245! with RSI: ffffffff814d408b pointing to sw_fence_dummy_notify(). I've been building with CONFIG_CC_OPTIMIZE_FOR_SIZE=y, and that function needs to be 4-byte aligned. v2: (Jani Nikula) - Change BUG_ON to WARN_ON v3: (Jani / Tvrtko) - Short circuit __i915_sw_fence_init on WARN_ON v4: (Lucas) - Break WARN_ON changes out in a different patch Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Hugh Dickins Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210922015039.26411-1-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index e9a0cad5c34d..e1bba4cfc0ff 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -362,8 +362,8 @@ static int __intel_context_active(struct i915_active *active) return 0; } -static int sw_fence_dummy_notify(struct i915_sw_fence *sf, - enum i915_sw_fence_notify state) +static int __i915_sw_fence_call +sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state) { return NOTIFY_DONE; } -- cgit v1.2.3 From 82a149a62b6b50ecd21b6e5e9cbdc8f6064a55d2 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Wed, 13 Oct 2021 00:17:38 +0200 Subject: drm/i915/gt: move remaining debugfs interfaces into gt The following interfaces: i915_wedged i915_forcewake_user are dependent on gt values. Put them inside gt/ and drop the "i915_" prefix name. This would be the new structure: dri/0/gt | +-- forcewake_user | \-- reset For backwards compatibility with existing igt (and the slight semantic difference between operating on the i915 abi entry points and the deep gt info): dri/0 | +-- i915_wedged | \-- i915_forcewake_user remain at the top level. Signed-off-by: Andi Shyti Cc: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Lucas De Marchi Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20211012221738.16029-1-andi@etezian.org --- drivers/gpu/drm/i915/gt/intel_gt_debugfs.c | 55 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_debugfs.h | 4 ++ drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 41 ++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h | 4 ++ drivers/gpu/drm/i915/i915_debugfs.c | 43 ++++----------------- 5 files changed, 111 insertions(+), 36 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c index 1fe19ccd2794..f103664b71d4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -13,6 +13,59 @@ #include "pxp/intel_pxp_debugfs.h" #include "uc/intel_uc_debugfs.h" +int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val) +{ + int ret = intel_gt_terminally_wedged(gt); + + switch (ret) { + case -EIO: + *val = 1; + return 0; + case 0: + *val = 0; + return 0; + default: + return ret; + } +} + +int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val) +{ + /* Flush any previous reset before applying for a new one */ + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); + + intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE, + "Manually reset engine mask to %llx", val); + return 0; +} + +/* + * keep the interface clean where the first parameter + * is a 'struct intel_gt *' instead of 'void *' + */ +static int __intel_gt_debugfs_reset_show(void *data, u64 *val) +{ + return intel_gt_debugfs_reset_show(data, val); +} + +static int __intel_gt_debugfs_reset_store(void *data, u64 val) +{ + return intel_gt_debugfs_reset_store(data, val); +} + +DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show, + __intel_gt_debugfs_reset_store, "%llu\n"); + +static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root) +{ + static const struct intel_gt_debugfs_file files[] = { + { "reset", &reset_fops, NULL }, + }; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); +} + void intel_gt_debugfs_register(struct intel_gt *gt) { struct dentry *root; @@ -24,6 +77,8 @@ void intel_gt_debugfs_register(struct intel_gt *gt) if (IS_ERR(root)) return; + gt_debugfs_register(gt, root); + intel_gt_engines_debugfs_register(gt, root); intel_gt_pm_debugfs_register(gt, root); intel_sseu_debugfs_register(gt, root); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h index 8b6fca09897c..e307ceb99031 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h @@ -35,4 +35,8 @@ void intel_gt_debugfs_register_files(struct dentry *root, const struct intel_gt_debugfs_file *files, unsigned long count, void *data); +/* functions that need to be accessed by the upper level non-gt interfaces */ +int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val); +int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val); + #endif /* INTEL_GT_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 5f84ad602642..0bc1454f38dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -19,6 +19,46 @@ #include "intel_sideband.h" #include "intel_uncore.h" +int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) +{ + atomic_inc(>->user_wakeref); + intel_gt_pm_get(gt); + if (GRAPHICS_VER(gt->i915) >= 6) + intel_uncore_forcewake_user_get(gt->uncore); + + return 0; +} + +int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) +{ + if (GRAPHICS_VER(gt->i915) >= 6) + intel_uncore_forcewake_user_put(gt->uncore); + intel_gt_pm_put(gt); + atomic_dec(>->user_wakeref); + + return 0; +} + +static int forcewake_user_open(struct inode *inode, struct file *file) +{ + struct intel_gt *gt = inode->i_private; + + return intel_gt_pm_debugfs_forcewake_user_open(gt); +} + +static int forcewake_user_release(struct inode *inode, struct file *file) +{ + struct intel_gt *gt = inode->i_private; + + return intel_gt_pm_debugfs_forcewake_user_release(gt); +} + +static const struct file_operations forcewake_user_fops = { + .owner = THIS_MODULE, + .open = forcewake_user_open, + .release = forcewake_user_release, +}; + static int fw_domains_show(struct seq_file *m, void *data) { struct intel_gt *gt = m->private; @@ -627,6 +667,7 @@ void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root) { "drpc", &drpc_fops, NULL }, { "frequency", &frequency_fops, NULL }, { "forcewake", &fw_domains_fops, NULL }, + { "forcewake_user", &forcewake_user_fops, NULL}, { "llc", &llc_fops, llc_eval }, { "rps_boost", &rps_boost_fops, rps_eval }, }; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h index 2b824289582b..a8457887ec65 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h @@ -13,4 +13,8 @@ struct drm_printer; void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root); void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m); +/* functions that need to be accessed by the upper level non-gt interfaces */ +int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt); +int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt); + #endif /* INTEL_GT_PM_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fdbd46ff59e0..636cc3cf88be 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -35,6 +35,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_clock_utils.h" +#include "gt/intel_gt_debugfs.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm_debugfs.h" #include "gt/intel_gt_requests.h" @@ -554,36 +555,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) return 0; } -static int -i915_wedged_get(void *data, u64 *val) +static int i915_wedged_get(void *data, u64 *val) { struct drm_i915_private *i915 = data; - int ret = intel_gt_terminally_wedged(&i915->gt); - switch (ret) { - case -EIO: - *val = 1; - return 0; - case 0: - *val = 0; - return 0; - default: - return ret; - } + return intel_gt_debugfs_reset_show(&i915->gt, val); } -static int -i915_wedged_set(void *data, u64 val) +static int i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - /* Flush any previous reset before applying for a new one */ - wait_event(i915->gt.reset.queue, - !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); - - intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, - "Manually set wedged engine mask = %llx", val); - return 0; + return intel_gt_debugfs_reset_store(&i915->gt, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, @@ -728,27 +711,15 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct intel_gt *gt = &i915->gt; - - atomic_inc(>->user_wakeref); - intel_gt_pm_get(gt); - if (GRAPHICS_VER(i915) >= 6) - intel_uncore_forcewake_user_get(gt->uncore); - return 0; + return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt); } static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct intel_gt *gt = &i915->gt; - if (GRAPHICS_VER(i915) >= 6) - intel_uncore_forcewake_user_put(&i915->uncore); - intel_gt_pm_put(gt); - atomic_dec(>->user_wakeref); - - return 0; + return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt); } static const struct file_operations i915_forcewake_fops = { -- cgit v1.2.3 From 0ea92ace8b95f67224ee26c4955efc7104d8e8e1 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:41 -0700 Subject: drm/i915/guc: Move GuC guc_id allocation under submission state sub-struct Move guc_id allocation under submission state sub-struct as a future patch will reuse the spin lock as a global submission state lock. Moving this into sub-struct makes ownership of fields / lock clear. v2: (Docs) - Add comment for submission_state sub-structure v3: (John Harrison) - Fixup a few comments v4: (John Harrison) - Fix typo Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-2-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 6 +-- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 28 +++++++++----- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 ++++++++++++----------- 3 files changed, 46 insertions(+), 34 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 12252c411159..e7e3984aab78 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -197,18 +197,18 @@ struct intel_context { struct { /** * @id: handle which is used to uniquely identify this context - * with the GuC, protected by guc->contexts_lock + * with the GuC, protected by guc->submission_state.lock */ u16 id; /** * @ref: the number of references to the guc_id, when * transitioning in and out of zero protected by - * guc->contexts_lock + * guc->submission_state.lock */ atomic_t ref; /** * @link: in guc->guc_id_list when the guc_id has no refs but is - * still valid, protected by guc->contexts_lock + * still valid, protected by guc->submission_state.lock */ struct list_head link; } guc_id; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 5dd174babf7a..82e248c2290c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -71,16 +71,26 @@ struct intel_guc { } interrupts; /** - * @contexts_lock: protects guc_ids, guc_id_list, ce->guc_id.id, and - * ce->guc_id.ref when transitioning in and out of zero + * @submission_state: sub-structure for submission state protected by + * single lock */ - spinlock_t contexts_lock; - /** @guc_ids: used to allocate unique ce->guc_id.id values */ - struct ida guc_ids; - /** - * @guc_id_list: list of intel_context with valid guc_ids but no refs - */ - struct list_head guc_id_list; + struct { + /** + * @lock: protects everything in submission_state, + * ce->guc_id.id, and ce->guc_id.ref when transitioning in and + * out of zero + */ + spinlock_t lock; + /** + * @guc_ids: used to allocate new guc_ids + */ + struct ida guc_ids; + /** + * @guc_id_list: list of intel_context with valid guc_ids but no + * refs + */ + struct list_head guc_id_list; + } submission_state; /** * @submission_supported: tracks whether we support GuC submission on diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ba0de35f6323..48bdcdb823b3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -68,7 +68,7 @@ * fence is used to stall all requests associated with this guc_id until the * corresponding G2H returns indicating the guc_id has been deregistered. * - * guc_ids: + * submission_state.guc_ids: * Unique number associated with private GuC context data passed in during * context registration / submission / deregistration. 64k available. Simple ida * is used for allocation. @@ -89,7 +89,7 @@ * sched_engine can be submitting at a time. Currently only one sched_engine is * used for all of GuC submission but that could change in the future. * - * guc->contexts_lock + * guc->submission_state.lock * Protects guc_id allocation for the given GuC, i.e. only one context can be * doing guc_id allocation operations at a time for each GuC in the system. * @@ -103,7 +103,7 @@ * * Lock ordering rules: * sched_engine->lock -> ce->guc_state.lock - * guc->contexts_lock -> ce->guc_state.lock + * guc->submission_state.lock -> ce->guc_state.lock * * Reset races: * When a full GT reset is triggered it is assumed that some G2H responses to @@ -1148,9 +1148,9 @@ int intel_guc_submission_init(struct intel_guc *guc) xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); - spin_lock_init(&guc->contexts_lock); - INIT_LIST_HEAD(&guc->guc_id_list); - ida_init(&guc->guc_ids); + spin_lock_init(&guc->submission_state.lock); + INIT_LIST_HEAD(&guc->submission_state.guc_id_list); + ida_init(&guc->submission_state.guc_ids); return 0; } @@ -1215,7 +1215,7 @@ static void guc_submit_request(struct i915_request *rq) static int new_guc_id(struct intel_guc *guc) { - return ida_simple_get(&guc->guc_ids, 0, + return ida_simple_get(&guc->submission_state.guc_ids, 0, GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); } @@ -1223,7 +1223,8 @@ static int new_guc_id(struct intel_guc *guc) static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) { if (!context_guc_id_invalid(ce)) { - ida_simple_remove(&guc->guc_ids, ce->guc_id.id); + ida_simple_remove(&guc->submission_state.guc_ids, + ce->guc_id.id); reset_lrc_desc(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } @@ -1235,9 +1236,9 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) { unsigned long flags; - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); __release_guc_id(guc, ce); - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); } static int steal_guc_id(struct intel_guc *guc) @@ -1245,10 +1246,10 @@ static int steal_guc_id(struct intel_guc *guc) struct intel_context *ce; int guc_id; - lockdep_assert_held(&guc->contexts_lock); + lockdep_assert_held(&guc->submission_state.lock); - if (!list_empty(&guc->guc_id_list)) { - ce = list_first_entry(&guc->guc_id_list, + if (!list_empty(&guc->submission_state.guc_id_list)) { + ce = list_first_entry(&guc->submission_state.guc_id_list, struct intel_context, guc_id.link); @@ -1273,7 +1274,7 @@ static int assign_guc_id(struct intel_guc *guc, u16 *out) { int ret; - lockdep_assert_held(&guc->contexts_lock); + lockdep_assert_held(&guc->submission_state.lock); ret = new_guc_id(guc); if (unlikely(ret < 0)) { @@ -1295,7 +1296,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); try_again: - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); might_lock(&ce->guc_state.lock); @@ -1310,7 +1311,7 @@ try_again: atomic_inc(&ce->guc_id.ref); out_unlock: - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); /* * -EAGAIN indicates no guc_id are available, let's retire any @@ -1346,11 +1347,12 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) if (unlikely(context_guc_id_invalid(ce))) return; - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) && !atomic_read(&ce->guc_id.ref)) - list_add_tail(&ce->guc_id.link, &guc->guc_id_list); - spin_unlock_irqrestore(&guc->contexts_lock, flags); + list_add_tail(&ce->guc_id.link, + &guc->submission_state.guc_id_list); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); } static int __guc_action_register_context(struct intel_guc *guc, @@ -1921,16 +1923,16 @@ static void guc_context_destroy(struct kref *kref) * returns indicating this context has been deregistered the guc_id is * returned to the pool of available guc_id. */ - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); if (context_guc_id_invalid(ce)) { - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); __guc_context_destroy(ce); return; } if (!list_empty(&ce->guc_id.link)) list_del_init(&ce->guc_id.link); - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); /* Seal race with Reset */ spin_lock_irqsave(&ce->guc_state.lock, flags); -- cgit v1.2.3 From 1a52faed3131147c10bb7f908d0f7a29b94f59ae Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:42 -0700 Subject: drm/i915/guc: Take GT PM ref when deregistering context Taking a PM reference to prevent intel_gt_wait_for_idle from short circuiting while a deregister context H2G is in flight. To do this must issue the deregister H2G from a worker as context can be destroyed from an atomic context and taking GT PM ref blows up. Previously we took a runtime PM from this atomic context which worked but will stop working once runtime pm autosuspend in enabled. So this patch is two fold, stop intel_gt_wait_for_idle from short circuting and fix runtime pm autosuspend. v2: (John Harrison) - Split structure changes out in different patch (Tvrtko) - Don't drop lock in deregister_destroyed_contexts v3: (John Harrison) - Flush destroyed contexts before destroying context reg pool Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-3-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 2 + drivers/gpu/drm/i915/gt/intel_context_types.h | 7 ++ drivers/gpu/drm/i915/gt/intel_engine_pm.h | 5 + drivers/gpu/drm/i915/gt/intel_gt_pm.h | 4 + drivers/gpu/drm/i915/gt/uc/intel_guc.h | 11 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 146 ++++++++++++++-------- 6 files changed, 121 insertions(+), 54 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index e1bba4cfc0ff..0f25d07e55e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -399,6 +399,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) ce->guc_id.id = GUC_INVALID_LRC_ID; INIT_LIST_HEAD(&ce->guc_id.link); + INIT_LIST_HEAD(&ce->destroyed_link); + /* * Initialize fence to be complete as this is expected to be complete * unless there is a pending schedule disable outstanding. diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index e7e3984aab78..4613d027cbc3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -213,6 +213,13 @@ struct intel_context { struct list_head link; } guc_id; + /** + * @destroyed_link: link in guc->submission_state.destroyed_contexts, in + * list when context is pending to be destroyed (deregistered with the + * GuC), protected by guc->submission_state.lock + */ + struct list_head destroyed_link; + #ifdef CONFIG_DRM_I915_SELFTEST /** * @drop_schedule_enable: Force drop of schedule enable G2H for selftest diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 8520c595f5e1..6fdeae668e6e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -16,6 +16,11 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine) return intel_wakeref_is_active(&engine->wakeref); } +static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) +{ + __intel_wakeref_get(&engine->wakeref); +} + static inline void intel_engine_pm_get(struct intel_engine_cs *engine) { intel_wakeref_get(&engine->wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index d0588d8aaa44..05de6c1af25b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -41,6 +41,10 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt) intel_wakeref_put_async(>->wakeref); } +#define with_intel_gt_pm(gt, tmp) \ + for (tmp = 1, intel_gt_pm_get(gt); tmp; \ + intel_gt_pm_put(gt), tmp = 0) + static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { return intel_wakeref_wait_for_idle(>->wakeref); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 82e248c2290c..74f071a0b6d5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -90,6 +90,17 @@ struct intel_guc { * refs */ struct list_head guc_id_list; + /** + * @destroyed_contexts: list of contexts waiting to be destroyed + * (deregistered with the GuC) + */ + struct list_head destroyed_contexts; + /** + * @destroyed_worker: worker to deregister contexts, need as we + * need to take a GT PM reference and can't from destroy + * function as it might be in an atomic context (no sleeping) + */ + struct work_struct destroyed_worker; } submission_state; /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 48bdcdb823b3..5c53428032b5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -90,8 +90,8 @@ * used for all of GuC submission but that could change in the future. * * guc->submission_state.lock - * Protects guc_id allocation for the given GuC, i.e. only one context can be - * doing guc_id allocation operations at a time for each GuC in the system. + * Global lock for GuC submission state. Protects guc_ids and destroyed contexts + * list. * * ce->guc_state.lock * Protects everything under ce->guc_state. Ensures that a context is in the @@ -719,6 +719,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) if (deregister) guc_signal_context_fence(ce); if (destroyed) { + intel_gt_pm_put_async(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } @@ -797,6 +798,8 @@ static void guc_flush_submissions(struct intel_guc *guc) spin_unlock_irqrestore(&sched_engine->lock, flags); } +static void guc_flush_destroyed_contexts(struct intel_guc *guc); + void intel_guc_submission_reset_prepare(struct intel_guc *guc) { int i; @@ -815,6 +818,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) spin_unlock_irq(&guc_to_gt(guc)->irq_lock); guc_flush_submissions(guc); + guc_flush_destroyed_contexts(guc); /* * Handle any outstanding G2Hs before reset. Call IRQ handler directly @@ -1126,6 +1130,8 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) intel_gt_unpark_heartbeats(guc_to_gt(guc)); } +static void destroyed_worker_func(struct work_struct *w); + /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. @@ -1151,6 +1157,9 @@ int intel_guc_submission_init(struct intel_guc *guc) spin_lock_init(&guc->submission_state.lock); INIT_LIST_HEAD(&guc->submission_state.guc_id_list); ida_init(&guc->submission_state.guc_ids); + INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts); + INIT_WORK(&guc->submission_state.destroyed_worker, + destroyed_worker_func); return 0; } @@ -1160,6 +1169,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) if (!guc->lrc_desc_pool) return; + guc_flush_destroyed_contexts(guc); guc_lrc_desc_pool_destroy(guc); i915_sched_engine_put(guc->sched_engine); } @@ -1859,11 +1869,30 @@ unpin: static inline void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = guc_to_gt(guc); + unsigned long flags; + bool disabled; + GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(context_enabled(ce)); + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) { + __intel_gt_pm_get(gt); + set_context_destroyed(ce); + clr_context_registered(ce); + } + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + release_guc_id(guc, ce); + __guc_context_destroy(ce); + return; + } + deregister_context(ce, ce->guc_id.id); } @@ -1891,78 +1920,86 @@ static void __guc_context_destroy(struct intel_context *ce) } } +static void guc_flush_destroyed_contexts(struct intel_guc *guc) +{ + struct intel_context *ce, *cn; + unsigned long flags; + + GEM_BUG_ON(!submission_disabled(guc) && + guc_submission_initialized(guc)); + + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_for_each_entry_safe(ce, cn, + &guc->submission_state.destroyed_contexts, + destroyed_link) { + list_del_init(&ce->destroyed_link); + __release_guc_id(guc, ce); + __guc_context_destroy(ce); + } + spin_unlock_irqrestore(&guc->submission_state.lock, flags); +} + +static void deregister_destroyed_contexts(struct intel_guc *guc) +{ + struct intel_context *ce, *cn; + unsigned long flags; + + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_for_each_entry_safe(ce, cn, + &guc->submission_state.destroyed_contexts, + destroyed_link) { + list_del_init(&ce->destroyed_link); + guc_lrc_desc_unpin(ce); + } + spin_unlock_irqrestore(&guc->submission_state.lock, flags); +} + +static void destroyed_worker_func(struct work_struct *w) +{ + struct intel_guc *guc = container_of(w, struct intel_guc, + submission_state.destroyed_worker); + struct intel_gt *gt = guc_to_gt(guc); + int tmp; + + with_intel_gt_pm(gt, tmp) + deregister_destroyed_contexts(guc); +} + static void guc_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); - struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; struct intel_guc *guc = ce_to_guc(ce); - intel_wakeref_t wakeref; unsigned long flags; - bool disabled; + bool destroy; /* * If the guc_id is invalid this context has been stolen and we can free * it immediately. Also can be freed immediately if the context is not * registered with the GuC or the GuC is in the middle of a reset. */ - if (context_guc_id_invalid(ce)) { - __guc_context_destroy(ce); - return; - } else if (submission_disabled(guc) || - !lrc_desc_registered(guc, ce->guc_id.id)) { - release_guc_id(guc, ce); - __guc_context_destroy(ce); - return; - } - - /* - * We have to acquire the context spinlock and check guc_id again, if it - * is valid it hasn't been stolen and needs to be deregistered. We - * delete this context from the list of unpinned guc_id available to - * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB - * returns indicating this context has been deregistered the guc_id is - * returned to the pool of available guc_id. - */ spin_lock_irqsave(&guc->submission_state.lock, flags); - if (context_guc_id_invalid(ce)) { - spin_unlock_irqrestore(&guc->submission_state.lock, flags); - __guc_context_destroy(ce); - return; + destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || + !lrc_desc_registered(guc, ce->guc_id.id); + if (likely(!destroy)) { + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); + list_add_tail(&ce->destroyed_link, + &guc->submission_state.destroyed_contexts); + } else { + __release_guc_id(guc, ce); } - - if (!list_empty(&ce->guc_id.link)) - list_del_init(&ce->guc_id.link); spin_unlock_irqrestore(&guc->submission_state.lock, flags); - - /* Seal race with Reset */ - spin_lock_irqsave(&ce->guc_state.lock, flags); - disabled = submission_disabled(guc); - if (likely(!disabled)) { - set_context_destroyed(ce); - clr_context_registered(ce); - } - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - if (unlikely(disabled)) { - release_guc_id(guc, ce); + if (unlikely(destroy)) { __guc_context_destroy(ce); return; } /* - * We defer GuC context deregistration until the context is destroyed - * in order to save on CTBs. With this optimization ideally we only need - * 1 CTB to register the context during the first pin and 1 CTB to - * deregister the context when the context is destroyed. Without this - * optimization, a CTB would be needed every pin & unpin. - * - * XXX: Need to acqiure the runtime wakeref as this can be triggered - * from context_free_worker when runtime wakeref is not held. - * guc_lrc_desc_unpin requires the runtime as a GuC register is written - * in H2G CTB to deregister the context. A future patch may defer this - * H2G CTB if the runtime wakeref is zero. + * We use a worker to issue the H2G to deregister the context as we can + * take the GT PM for the first time which isn't allowed from an atomic + * context. */ - with_intel_runtime_pm(runtime_pm, wakeref) - guc_lrc_desc_unpin(ce); + queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker); } static int guc_context_alloc(struct intel_context *ce) @@ -2798,6 +2835,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, intel_context_put(ce); } else if (context_destroyed(ce)) { /* Context has been destroyed */ + intel_gt_pm_put_async(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } -- cgit v1.2.3 From f61eae1815705494aa1cd7a8a94f3442fea328d7 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:43 -0700 Subject: drm/i915/guc: Take engine PM when a context is pinned with GuC submission Taking a PM reference to prevent intel_gt_wait_for_idle from short circuiting while any user context has scheduling enabled. Returning GT idle when it is not can cause all sorts of issues throughout the stack. v2: (Daniel Vetter) - Add might_lock annotations to pin / unpin function v3: (CI) - Drop intel_engine_pm_might_put from unpin path as an async put is used v4: (John Harrison) - Make intel_engine_pm_might_get/put work with GuC virtual engines - Update commit message v5: - Update commit message again Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-4-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 2 ++ drivers/gpu/drm/i915/gt/intel_engine_pm.h | 32 ++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_pm.h | 10 +++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 36 +++++++++++++++++++++-- drivers/gpu/drm/i915/intel_wakeref.h | 12 ++++++++ 5 files changed, 89 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 0f25d07e55e5..7a7cd2448277 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -240,6 +240,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce, if (err) goto err_post_unpin; + intel_engine_pm_might_get(ce->engine); + if (unlikely(intel_context_is_closed(ce))) { err = -ENOENT; goto err_unlock; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 6fdeae668e6e..d68675925b79 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -6,9 +6,11 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "i915_drv.h" #include "i915_request.h" #include "intel_engine_types.h" #include "intel_wakeref.h" +#include "intel_gt_pm.h" static inline bool intel_engine_pm_is_awake(const struct intel_engine_cs *engine) @@ -31,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) return intel_wakeref_get_if_active(&engine->wakeref); } +static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) +{ + if (!intel_engine_is_virtual(engine)) { + intel_wakeref_might_get(&engine->wakeref); + } else { + struct intel_gt *gt = engine->gt; + struct intel_engine_cs *tengine; + intel_engine_mask_t tmp, mask = engine->mask; + + for_each_engine_masked(tengine, gt, mask, tmp) + intel_wakeref_might_get(&tengine->wakeref); + } + intel_gt_pm_might_get(engine->gt); +} + static inline void intel_engine_pm_put(struct intel_engine_cs *engine) { intel_wakeref_put(&engine->wakeref); @@ -52,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) intel_wakeref_unlock_wait(&engine->wakeref); } +static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine) +{ + if (!intel_engine_is_virtual(engine)) { + intel_wakeref_might_put(&engine->wakeref); + } else { + struct intel_gt *gt = engine->gt; + struct intel_engine_cs *tengine; + intel_engine_mask_t tmp, mask = engine->mask; + + for_each_engine_masked(tengine, gt, mask, tmp) + intel_wakeref_might_put(&tengine->wakeref); + } + intel_gt_pm_might_put(engine->gt); +} + static inline struct i915_request * intel_engine_create_kernel_request(struct intel_engine_cs *engine) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 05de6c1af25b..bc898df7a48c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) return intel_wakeref_get_if_active(>->wakeref); } +static inline void intel_gt_pm_might_get(struct intel_gt *gt) +{ + intel_wakeref_might_get(>->wakeref); +} + static inline void intel_gt_pm_put(struct intel_gt *gt) { intel_wakeref_put(>->wakeref); @@ -41,6 +46,11 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt) intel_wakeref_put_async(>->wakeref); } +static inline void intel_gt_pm_might_put(struct intel_gt *gt) +{ + intel_wakeref_might_put(>->wakeref); +} + #define with_intel_gt_pm(gt, tmp) \ for (tmp = 1, intel_gt_pm_get(gt); tmp; \ intel_gt_pm_put(gt), tmp = 0) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 5c53428032b5..795cc071d05d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1571,7 +1571,12 @@ static int guc_context_pre_pin(struct intel_context *ce, static int guc_context_pin(struct intel_context *ce, void *vaddr) { - return __guc_context_pin(ce, ce->engine, vaddr); + int ret = __guc_context_pin(ce, ce->engine, vaddr); + + if (likely(!ret && !intel_context_is_barrier(ce))) + intel_engine_pm_get(ce->engine); + + return ret; } static void guc_context_unpin(struct intel_context *ce) @@ -1580,6 +1585,9 @@ static void guc_context_unpin(struct intel_context *ce) unpin_guc_id(guc, ce); lrc_unpin(ce); + + if (likely(!intel_context_is_barrier(ce))) + intel_engine_pm_put_async(ce->engine); } static void guc_context_post_unpin(struct intel_context *ce) @@ -2341,8 +2349,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce, static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + int ret = __guc_context_pin(ce, engine, vaddr); + intel_engine_mask_t tmp, mask = ce->engine->mask; + + if (likely(!ret)) + for_each_engine_masked(engine, ce->engine->gt, mask, tmp) + intel_engine_pm_get(engine); - return __guc_context_pin(ce, engine, vaddr); + return ret; +} + +static void guc_virtual_context_unpin(struct intel_context *ce) +{ + intel_engine_mask_t tmp, mask = ce->engine->mask; + struct intel_engine_cs *engine; + struct intel_guc *guc = ce_to_guc(ce); + + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + + unpin_guc_id(guc, ce); + lrc_unpin(ce); + + for_each_engine_masked(engine, ce->engine->gt, mask, tmp) + intel_engine_pm_put_async(engine); } static void guc_virtual_context_enter(struct intel_context *ce) @@ -2379,7 +2409,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { .pre_pin = guc_virtual_context_pre_pin, .pin = guc_virtual_context_pin, - .unpin = guc_context_unpin, + .unpin = guc_virtual_context_unpin, .post_unpin = guc_context_post_unpin, .ban = guc_context_ban, diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 545c8f277c46..4f4c2e15e736 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -123,6 +123,12 @@ enum { __INTEL_WAKEREF_PUT_LAST_BIT__ }; +static inline void +intel_wakeref_might_get(struct intel_wakeref *wf) +{ + might_lock(&wf->mutex); +} + /** * intel_wakeref_put_flags: Release the wakeref * @wf: the wakeref @@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); } +static inline void +intel_wakeref_might_put(struct intel_wakeref *wf) +{ + might_lock(&wf->mutex); +} + /** * intel_wakeref_lock: Lock the wakeref (mutex) * @wf: the wakeref -- cgit v1.2.3 From 3633242927101b4bcff14cb0f718ecd4d346c5b1 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:44 -0700 Subject: drm/i915/guc: Don't call switch_to_kernel_context with GuC submission Calling switch_to_kernel_context isn't needed if the engine PM reference is taken while all user contexts are pinned as if don't have PM ref that guarantees that all user contexts scheduling is disabled. By not calling switch_to_kernel_context we save on issuing a request to the engine. v2: (Daniel Vetter) - Add FIXME comment about pushing switch_to_kernel_context to backend v3: (John Harrison) - Update commit message - Fix workding comment Signed-off-by: Matthew Brost Reviewed-by: Daniel Vetter Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-5-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index dacd62773735..a1334b48dde7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -162,6 +162,19 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) unsigned long flags; bool result = true; + /* + * This is execlist specific behaviour intended to ensure the GPU is + * idle by switching to a known 'safe' context. With GuC submission, the + * same idle guarantee is achieved by other means (disabling + * scheduling). Further, switching to a 'safe' context has no effect + * with GuC submission as the scheduler can just switch back again. + * + * FIXME: Move this backend scheduler specific behaviour into the + * scheduler backend. + */ + if (intel_engine_uses_guc(engine)) + return true; + /* GPU is pointing to the void, as good as in the kernel context. */ if (intel_gt_is_wedged(engine->gt)) return true; -- cgit v1.2.3 From 4f3059dc2dbbc5547684558f18565719c365b30e Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:45 -0700 Subject: drm/i915: Add logical engine mapping Add logical engine mapping. This is required for split-frame, as workloads need to be placed on engines in a logically contiguous manner. v2: (Daniel Vetter) - Add kernel doc for new fields v3: (Tvrtko) - Update comment for new logical_mask field v4: (John Harrison) - Update comment for new logical_mask field Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-6-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 60 ++++++++++++++++++---- drivers/gpu/drm/i915/gt/intel_engine_types.h | 7 +++ .../gpu/drm/i915/gt/intel_execlists_submission.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 21 +------- 5 files changed, 62 insertions(+), 29 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 2ae57e4656a3..2eb798ad068b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -290,7 +290,8 @@ static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) GEM_DEBUG_WARN_ON(iir); } -static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) +static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, + u8 logical_instance) { const struct engine_info *info = &intel_engines[id]; struct drm_i915_private *i915 = gt->i915; @@ -335,6 +336,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->class = info->class; engine->instance = info->instance; + engine->logical_mask = BIT(logical_instance); __sprint_engine_name(engine); engine->props.heartbeat_interval_ms = @@ -588,6 +590,37 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) return info->engine_mask; } +static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, + u8 class, const u8 *map, u8 num_instances) +{ + int i, j; + u8 current_logical_id = 0; + + for (j = 0; j < num_instances; ++j) { + for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { + if (!HAS_ENGINE(gt, i) || + intel_engines[i].class != class) + continue; + + if (intel_engines[i].instance == map[j]) { + logical_ids[intel_engines[i].instance] = + current_logical_id++; + break; + } + } + } +} + +static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) +{ + int i; + u8 map[MAX_ENGINE_INSTANCE + 1]; + + for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) + map[i] = i; + populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map)); +} + /** * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers * @gt: pointer to struct intel_gt @@ -599,7 +632,8 @@ int intel_engines_init_mmio(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; const unsigned int engine_mask = init_engine_mask(gt); unsigned int mask = 0; - unsigned int i; + unsigned int i, class; + u8 logical_ids[MAX_ENGINE_INSTANCE + 1]; int err; drm_WARN_ON(&i915->drm, engine_mask == 0); @@ -609,15 +643,23 @@ int intel_engines_init_mmio(struct intel_gt *gt) if (i915_inject_probe_failure(i915)) return -ENODEV; - for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { - if (!HAS_ENGINE(gt, i)) - continue; + for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { + setup_logical_ids(gt, logical_ids, class); - err = intel_engine_setup(gt, i); - if (err) - goto cleanup; + for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { + u8 instance = intel_engines[i].instance; + + if (intel_engines[i].class != class || + !HAS_ENGINE(gt, i)) + continue; - mask |= BIT(i); + err = intel_engine_setup(gt, i, + logical_ids[instance]); + if (err) + goto cleanup; + + mask |= BIT(i); + } } /* diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 9167ce52487c..e0f773585c29 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -269,6 +269,13 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; + /** + * @logical_mask: logical mask of engine, reported to user space via + * query IOCTL and used to communicate with the GuC in logical space. + * The logical instance of a physical engine can change based on product + * and fusing. + */ + intel_engine_mask_t logical_mask; u8 class; u8 instance; diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 73a79c2acd3a..43a74b216efb 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -3877,6 +3877,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) ve->siblings[ve->num_siblings++] = sibling; ve->base.mask |= sibling->mask; + ve->base.logical_mask |= sibling->logical_mask; /* * All physical engines must be compatible for their emission diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 2c6ea64af7ec..621c893a009f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -176,7 +176,7 @@ static void guc_mapping_table_init(struct intel_gt *gt, for_each_engine(engine, gt, id) { u8 guc_class = engine_class_to_guc_class(engine->class); - system_info->mapping_table[guc_class][engine->instance] = + system_info->mapping_table[guc_class][ilog2(engine->logical_mask)] = engine->instance; } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 795cc071d05d..fd6594bc1b96 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1423,23 +1423,6 @@ static int deregister_context(struct intel_context *ce, u32 guc_id) return __guc_action_deregister_context(guc, guc_id); } -static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask) -{ - switch (class) { - case RENDER_CLASS: - return mask >> RCS0; - case VIDEO_ENHANCEMENT_CLASS: - return mask >> VECS0; - case VIDEO_DECODE_CLASS: - return mask >> VCS0; - case COPY_ENGINE_CLASS: - return mask >> BCS0; - default: - MISSING_CASE(class); - return 0; - } -} - static void guc_context_policy_init(struct intel_engine_cs *engine, struct guc_lrc_desc *desc) { @@ -1481,8 +1464,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc = __get_lrc_desc(guc, desc_idx); desc->engine_class = engine_class_to_guc_class(engine->class); - desc->engine_submit_mask = adjust_engine_mask(engine->class, - engine->mask); + desc->engine_submit_mask = engine->logical_mask; desc->hw_context_desc = ce->lrc.lrca; desc->priority = ce->guc_state.prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; @@ -3271,6 +3253,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count) } ve->base.mask |= sibling->mask; + ve->base.logical_mask |= sibling->logical_mask; if (n != 0 && ve->base.class != sibling->class) { DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", -- cgit v1.2.3 From 9409eb35942713d0cdd471e5ff99c93929d6a749 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:46 -0700 Subject: drm/i915: Expose logical engine instance to user Expose logical engine instance to user via query engine info IOCTL. This is required for split-frame workloads as these needs to be placed on engines in a logically contiguous order. The logical mapping can change based on fusing. Rather than having user have knowledge of the fusing we simply just expose the logical mapping with the existing query engine info IOCTL. IGT: https://patchwork.freedesktop.org/patch/445637/?series=92854&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Daniel Vetter) - Add IGT link, placeholder for media UMD Cc: Tvrtko Ursulin Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-7-matthew.brost@intel.com --- drivers/gpu/drm/i915/i915_query.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 5e2b909827f4..51b368be0fc4 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -124,7 +124,9 @@ query_engine_info(struct drm_i915_private *i915, for_each_uabi_engine(engine, i915) { info.engine.engine_class = engine->uabi_class; info.engine.engine_instance = engine->uabi_instance; + info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; info.capabilities = engine->uabi_capabilities; + info.logical_instance = ilog2(engine->logical_mask); if (copy_to_user(info_ptr, &info, sizeof(info))) return -EFAULT; -- cgit v1.2.3 From 3897df4c0187d0f38fff6944c3beab4b6aa92a1b Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:47 -0700 Subject: drm/i915/guc: Introduce context parent-child relationship Introduce context parent-child relationship. Once this relationship is created all pinning / unpinning operations are directed to the parent context. The parent context is responsible for pinning all of its children and itself. This is a precursor to the full GuC multi-lrc implementation but aligns to how GuC mutli-lrc interface is defined - a single H2G is used register / deregister all of the contexts simultaneously. Subsequent patches in the series will implement the pinning / unpinning operations for parent / child contexts. v2: (Daniel Vetter) - Add kernel doc, add wrapper to access parent to ensure safety v3: (John Harrison) - Fix comment explaing GEM_BUG_ON in to_parent() - Make variable names generic (non-GuC specific) v4: (John Harrison) - s/its'/its/g Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-8-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 29 +++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_context.h | 41 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_context_types.h | 21 ++++++++++++++ 3 files changed, 91 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 7a7cd2448277..4d95053855c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -403,6 +403,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) INIT_LIST_HEAD(&ce->destroyed_link); + INIT_LIST_HEAD(&ce->parallel.child_list); + /* * Initialize fence to be complete as this is expected to be complete * unless there is a pending schedule disable outstanding. @@ -417,10 +419,17 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) void intel_context_fini(struct intel_context *ce) { + struct intel_context *child, *next; + if (ce->timeline) intel_timeline_put(ce->timeline); i915_vm_put(ce->vm); + /* Need to put the creation ref for the children */ + if (intel_context_is_parent(ce)) + for_each_child_safe(ce, child, next) + intel_context_put(child); + mutex_destroy(&ce->pin_mutex); i915_active_fini(&ce->active); i915_sw_fence_fini(&ce->guc_state.blocked); @@ -537,6 +546,26 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce) return active; } +void intel_context_bind_parent_child(struct intel_context *parent, + struct intel_context *child) +{ + /* + * Callers responsibility to validate that this function is used + * correctly but we use GEM_BUG_ON here ensure that they do. + */ + GEM_BUG_ON(!intel_engine_uses_guc(parent->engine)); + GEM_BUG_ON(intel_context_is_pinned(parent)); + GEM_BUG_ON(intel_context_is_child(parent)); + GEM_BUG_ON(intel_context_is_pinned(child)); + GEM_BUG_ON(intel_context_is_child(child)); + GEM_BUG_ON(intel_context_is_parent(child)); + + parent->parallel.number_children++; + list_add_tail(&child->parallel.child_link, + &parent->parallel.child_list); + child->parallel.parent = parent; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_context.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index c41098950746..b63c10a144af 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -44,6 +44,47 @@ void intel_context_free(struct intel_context *ce); int intel_context_reconfigure_sseu(struct intel_context *ce, const struct intel_sseu sseu); +static inline bool intel_context_is_child(struct intel_context *ce) +{ + return !!ce->parallel.parent; +} + +static inline bool intel_context_is_parent(struct intel_context *ce) +{ + return !!ce->parallel.number_children; +} + +static inline bool intel_context_is_pinned(struct intel_context *ce); + +static inline struct intel_context * +intel_context_to_parent(struct intel_context *ce) +{ + if (intel_context_is_child(ce)) { + /* + * The parent holds ref count to the child so it is always safe + * for the parent to access the child, but the child has a + * pointer to the parent without a ref. To ensure this is safe + * the child should only access the parent pointer while the + * parent is pinned. + */ + GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); + + return ce->parallel.parent; + } else { + return ce; + } +} + +void intel_context_bind_parent_child(struct intel_context *parent, + struct intel_context *child); + +#define for_each_child(parent, ce)\ + list_for_each_entry(ce, &(parent)->parallel.child_list,\ + parallel.child_link) +#define for_each_child_safe(parent, ce, cn)\ + list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ + parallel.child_link) + /** * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context * @ce - the context diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 4613d027cbc3..76dfca57cb45 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -220,6 +220,27 @@ struct intel_context { */ struct list_head destroyed_link; + /** @parallel: sub-structure for parallel submission members */ + struct { + union { + /** + * @child_list: parent's list of children + * contexts, no protection as immutable after context + * creation + */ + struct list_head child_list; + /** + * @child_link: child's link into parent's list of + * children + */ + struct list_head child_link; + }; + /** @parent: pointer to parent if child */ + struct intel_context *parent; + /** @number_children: number of children if parent */ + u8 number_children; + } parallel; + #ifdef CONFIG_DRM_I915_SELFTEST /** * @drop_schedule_enable: Force drop of schedule enable G2H for selftest -- cgit v1.2.3 From c2aa552ff09daf78944f44e98d366009b27f1b63 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:48 -0700 Subject: drm/i915/guc: Add multi-lrc context registration Add multi-lrc context registration H2G. In addition a workqueue and process descriptor are setup during multi-lrc context registration as these data structures are needed for multi-lrc submission. v2: (John Harrison) - Move GuC specific fields into sub-struct - Clean up WQ defines - Add comment explaining math to derive WQ / PD address v3: (John Harrison) - Add PARENT_SCRATCH_SIZE define - Update comment explaining multi-lrc register v4: (John Harrison) - Move PARENT_SCRATCH_SIZE to common file Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-9-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.h | 2 + drivers/gpu/drm/i915/gt/intel_context_types.h | 12 +++ drivers/gpu/drm/i915/gt/intel_lrc.c | 5 + drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 2 - drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 115 +++++++++++++++++++++- 6 files changed, 134 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b63c10a144af..9f0995150a7a 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -44,6 +44,8 @@ void intel_context_free(struct intel_context *ce); int intel_context_reconfigure_sseu(struct intel_context *ce, const struct intel_sseu sseu); +#define PARENT_SCRATCH_SIZE PAGE_SIZE + static inline bool intel_context_is_child(struct intel_context *ce) { return !!ce->parallel.parent; diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 76dfca57cb45..48decb5ee954 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -239,6 +239,18 @@ struct intel_context { struct intel_context *parent; /** @number_children: number of children if parent */ u8 number_children; + /** @guc: GuC specific members for parallel submission */ + struct { + /** @wqi_head: head pointer in work queue */ + u16 wqi_head; + /** @wqi_tail: tail pointer in work queue */ + u16 wqi_tail; + /** + * @parent_page: page in context state (ce->state) used + * by parent for work queue, process descriptor + */ + u8 parent_page; + } guc; } parallel; #ifdef CONFIG_DRM_I915_SELFTEST diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 3ef9eaf8c50e..56156cf18c41 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -942,6 +942,11 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) context_size += PAGE_SIZE; } + if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) { + ce->parallel.guc.parent_page = context_size / PAGE_SIZE; + context_size += PARENT_SCRATCH_SIZE; + } + obj = i915_gem_object_create_lmem(engine->i915, context_size, I915_BO_ALLOC_PM_VOLATILE); if (IS_ERR(obj)) diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index 8ff582222aff..ba10bd374cee 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -142,6 +142,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, + INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index fa4be13c8854..0eeb2a9feeed 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -52,8 +52,6 @@ #define GUC_DOORBELL_INVALID 256 -#define GUC_WQ_SIZE (PAGE_SIZE * 2) - /* Work queue item header definitions */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fd6594bc1b96..d9f5be00e586 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -344,6 +344,46 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) return rb_entry(rb, struct i915_priolist, node); } +/* + * When using multi-lrc submission a scratch memory area is reserved in the + * parent's context state for the process descriptor and work queue. Currently + * the scratch area is sized to a page. + * + * The layout of this scratch area is below: + * 0 guc_process_desc + * ... unused + * PARENT_SCRATCH_SIZE / 2 work queue start + * ... work queue + * PARENT_SCRATCH_SIZE - 1 work queue end + */ +#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2) +#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE) +static u32 __get_process_desc_offset(struct intel_context *ce) +{ + GEM_BUG_ON(!ce->parallel.guc.parent_page); + + return ce->parallel.guc.parent_page * PAGE_SIZE; +} + +static u32 __get_wq_offset(struct intel_context *ce) +{ + return __get_process_desc_offset(ce) + WQ_OFFSET; +} + +static struct guc_process_desc * +__get_process_desc(struct intel_context *ce) +{ + /* + * Need to subtract LRC_STATE_OFFSET here as the + * parallel.guc.parent_page is the offset into ce->state while + * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET. + */ + return (struct guc_process_desc *) + (ce->lrc_reg_state + + ((__get_process_desc_offset(ce) - + LRC_STATE_OFFSET) / sizeof(u32))); +} + static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) { struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; @@ -1365,6 +1405,30 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) spin_unlock_irqrestore(&guc->submission_state.lock, flags); } +static int __guc_action_register_multi_lrc(struct intel_guc *guc, + struct intel_context *ce, + u32 guc_id, + u32 offset, + bool loop) +{ + struct intel_context *child; + u32 action[4 + MAX_ENGINE_INSTANCE]; + int len = 0; + + GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); + + action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; + action[len++] = guc_id; + action[len++] = ce->parallel.number_children + 1; + action[len++] = offset; + for_each_child(ce, child) { + offset += sizeof(struct guc_lrc_desc); + action[len++] = offset; + } + + return guc_submission_send_busy_loop(guc, action, len, 0, loop); +} + static int __guc_action_register_context(struct intel_guc *guc, u32 guc_id, u32 offset, @@ -1387,9 +1451,15 @@ static int register_context(struct intel_context *ce, bool loop) ce->guc_id.id * sizeof(struct guc_lrc_desc); int ret; + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_register(ce); - ret = __guc_action_register_context(guc, ce->guc_id.id, offset, loop); + if (intel_context_is_parent(ce)) + ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id, + offset, loop); + else + ret = __guc_action_register_context(guc, ce->guc_id.id, offset, + loop); if (likely(!ret)) { unsigned long flags; @@ -1418,6 +1488,7 @@ static int deregister_context(struct intel_context *ce, u32 guc_id) { struct intel_guc *guc = ce_to_guc(ce); + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_deregister(ce); return __guc_action_deregister_context(guc, guc_id); @@ -1445,6 +1516,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) struct guc_lrc_desc *desc; bool context_registered; intel_wakeref_t wakeref; + struct intel_context *child; int ret = 0; GEM_BUG_ON(!engine->mask); @@ -1470,6 +1542,41 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc); + /* + * If context is a parent, we need to register a process descriptor + * describing a work queue and register all child contexts. + */ + if (intel_context_is_parent(ce)) { + struct guc_process_desc *pdesc; + + ce->parallel.guc.wqi_tail = 0; + ce->parallel.guc.wqi_head = 0; + + desc->process_desc = i915_ggtt_offset(ce->state) + + __get_process_desc_offset(ce); + desc->wq_addr = i915_ggtt_offset(ce->state) + + __get_wq_offset(ce); + desc->wq_size = WQ_SIZE; + + pdesc = __get_process_desc(ce); + memset(pdesc, 0, sizeof(*(pdesc))); + pdesc->stage_id = ce->guc_id.id; + pdesc->wq_base_addr = desc->wq_addr; + pdesc->wq_size_bytes = desc->wq_size; + pdesc->wq_status = WQ_STATUS_ACTIVE; + + for_each_child(ce, child) { + desc = __get_lrc_desc(guc, child->guc_id.id); + + desc->engine_class = + engine_class_to_guc_class(engine->class); + desc->hw_context_desc = child->lrc.lrca; + desc->priority = ce->guc_state.prio; + desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; + guc_context_policy_init(engine, desc); + } + } + /* * The context_lookup xarray is used to determine if the hardware * context is currently registered. There are two cases in which it @@ -2804,6 +2911,12 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) return NULL; } + if (unlikely(intel_context_is_child(ce))) { + drm_err(&guc_to_gt(guc)->i915->drm, + "Context is child, desc_idx %u", desc_idx); + return NULL; + } + return ce; } -- cgit v1.2.3 From 44d25fec1a5d9615fab25fa90a3e70eef21e5d05 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:49 -0700 Subject: drm/i915/guc: Ensure GuC schedule operations do not operate on child contexts In GuC parent-child contexts the parent context controls the scheduling, ensure only the parent does the scheduling operations. Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-10-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index d9f5be00e586..fbcf2dc2b2de 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -324,6 +324,12 @@ static inline void decr_context_committed_requests(struct intel_context *ce) GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); } +static struct intel_context * +request_to_scheduling_context(struct i915_request *rq) +{ + return intel_context_to_parent(rq->context); +} + static inline bool context_guc_id_invalid(struct intel_context *ce) { return ce->guc_id.id == GUC_INVALID_LRC_ID; @@ -1711,6 +1717,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc, GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID); + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_sched_disable(ce); guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), @@ -1936,6 +1943,8 @@ static void guc_context_sched_disable(struct intel_context *ce) intel_wakeref_t wakeref; u16 guc_id; + GEM_BUG_ON(intel_context_is_child(ce)); + spin_lock_irqsave(&ce->guc_state.lock, flags); /* @@ -2304,6 +2313,8 @@ static void guc_signal_context_fence(struct intel_context *ce) { unsigned long flags; + GEM_BUG_ON(intel_context_is_child(ce)); + spin_lock_irqsave(&ce->guc_state.lock, flags); clr_context_wait_for_deregister_to_register(ce); __guc_signal_context_fence(ce); @@ -2334,7 +2345,7 @@ static void guc_context_init(struct intel_context *ce) static int guc_request_alloc(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); struct intel_guc *guc = ce_to_guc(ce); unsigned long flags; int ret; -- cgit v1.2.3 From 09c5e3a5e509bb10249c8252dc10c0d8a842e4ae Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:50 -0700 Subject: drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids Assign contexts in parent-child relationship consecutive guc_ids. This is accomplished by partitioning guc_id space between ones that need to be consecutive (1/16 available guc_ids) and ones that do not (15/16 of available guc_ids). The consecutive search is implemented via the bitmap API. This is a precursor to the full GuC multi-lrc implementation but aligns to how GuC mutli-lrc interface is defined - guc_ids must be consecutive when using the GuC multi-lrc interface. v2: (Daniel Vetter) - Explicitly state why we assign consecutive guc_ids v3: (John Harrison) - Bring back in spin lock Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-11-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 6 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 104 +++++++++++++++++----- 2 files changed, 86 insertions(+), 24 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 74f071a0b6d5..4ca197f400ba 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -82,9 +82,13 @@ struct intel_guc { */ spinlock_t lock; /** - * @guc_ids: used to allocate new guc_ids + * @guc_ids: used to allocate new guc_ids, single-lrc */ struct ida guc_ids; + /** + * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc + */ + unsigned long *guc_ids_bitmap; /** * @guc_id_list: list of intel_context with valid guc_ids but no * refs diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fbcf2dc2b2de..7ba4f1d6f69c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -128,6 +128,16 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); #define GUC_REQUEST_SIZE 64 /* bytes */ +/* + * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous + * per the GuC submission interface. A different allocation algorithm is used + * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to + * partition the guc_id space. We believe the number of multi-lrc contexts in + * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for + * multi-lrc. + */ +#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16) + /* * Below is a set of functions which control the GuC scheduling state which * require a lock. @@ -1207,6 +1217,11 @@ int intel_guc_submission_init(struct intel_guc *guc) INIT_WORK(&guc->submission_state.destroyed_worker, destroyed_worker_func); + guc->submission_state.guc_ids_bitmap = + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL); + if (!guc->submission_state.guc_ids_bitmap) + return -ENOMEM; + return 0; } @@ -1218,6 +1233,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_flush_destroyed_contexts(guc); guc_lrc_desc_pool_destroy(guc); i915_sched_engine_put(guc->sched_engine); + bitmap_free(guc->submission_state.guc_ids_bitmap); } static inline void queue_request(struct i915_sched_engine *sched_engine, @@ -1269,18 +1285,43 @@ static void guc_submit_request(struct i915_request *rq) spin_unlock_irqrestore(&sched_engine->lock, flags); } -static int new_guc_id(struct intel_guc *guc) +static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) { - return ida_simple_get(&guc->submission_state.guc_ids, 0, - GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + int ret; + + GEM_BUG_ON(intel_context_is_child(ce)); + + if (intel_context_is_parent(ce)) + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap, + NUMBER_MULTI_LRC_GUC_ID, + order_base_2(ce->parallel.number_children + + 1)); + else + ret = ida_simple_get(&guc->submission_state.guc_ids, + NUMBER_MULTI_LRC_GUC_ID, + GUC_MAX_LRC_DESCRIPTORS, + GFP_KERNEL | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN); + if (unlikely(ret < 0)) + return ret; + + ce->guc_id.id = ret; + return 0; } static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) { + GEM_BUG_ON(intel_context_is_child(ce)); + if (!context_guc_id_invalid(ce)) { - ida_simple_remove(&guc->submission_state.guc_ids, - ce->guc_id.id); + if (intel_context_is_parent(ce)) + bitmap_release_region(guc->submission_state.guc_ids_bitmap, + ce->guc_id.id, + order_base_2(ce->parallel.number_children + + 1)); + else + ida_simple_remove(&guc->submission_state.guc_ids, + ce->guc_id.id); reset_lrc_desc(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } @@ -1297,49 +1338,64 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) spin_unlock_irqrestore(&guc->submission_state.lock, flags); } -static int steal_guc_id(struct intel_guc *guc) +static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) { - struct intel_context *ce; - int guc_id; + struct intel_context *cn; lockdep_assert_held(&guc->submission_state.lock); + GEM_BUG_ON(intel_context_is_child(ce)); + GEM_BUG_ON(intel_context_is_parent(ce)); if (!list_empty(&guc->submission_state.guc_id_list)) { - ce = list_first_entry(&guc->submission_state.guc_id_list, + cn = list_first_entry(&guc->submission_state.guc_id_list, struct intel_context, guc_id.link); - GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); - GEM_BUG_ON(context_guc_id_invalid(ce)); + GEM_BUG_ON(atomic_read(&cn->guc_id.ref)); + GEM_BUG_ON(context_guc_id_invalid(cn)); + GEM_BUG_ON(intel_context_is_child(cn)); + GEM_BUG_ON(intel_context_is_parent(cn)); - list_del_init(&ce->guc_id.link); - guc_id = ce->guc_id.id; + list_del_init(&cn->guc_id.link); + ce->guc_id = cn->guc_id; spin_lock(&ce->guc_state.lock); - clr_context_registered(ce); + clr_context_registered(cn); spin_unlock(&ce->guc_state.lock); - set_context_guc_id_invalid(ce); - return guc_id; + set_context_guc_id_invalid(cn); + + return 0; } else { return -EAGAIN; } } -static int assign_guc_id(struct intel_guc *guc, u16 *out) +static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce) { int ret; lockdep_assert_held(&guc->submission_state.lock); + GEM_BUG_ON(intel_context_is_child(ce)); - ret = new_guc_id(guc); + ret = new_guc_id(guc, ce); if (unlikely(ret < 0)) { - ret = steal_guc_id(guc); + if (intel_context_is_parent(ce)) + return -ENOSPC; + + ret = steal_guc_id(guc, ce); if (ret < 0) return ret; } - *out = ret; + if (intel_context_is_parent(ce)) { + struct intel_context *child; + int i = 1; + + for_each_child(ce, child) + child->guc_id.id = ce->guc_id.id + i++; + } + return 0; } @@ -1357,7 +1413,7 @@ try_again: might_lock(&ce->guc_state.lock); if (context_guc_id_invalid(ce)) { - ret = assign_guc_id(guc, &ce->guc_id.id); + ret = assign_guc_id(guc, ce); if (ret) goto out_unlock; ret = 1; /* Indidcates newly assigned guc_id */ @@ -1399,8 +1455,10 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) unsigned long flags; GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0); + GEM_BUG_ON(intel_context_is_child(ce)); - if (unlikely(context_guc_id_invalid(ce))) + if (unlikely(context_guc_id_invalid(ce) || + intel_context_is_parent(ce))) return; spin_lock_irqsave(&guc->submission_state.lock, flags); -- cgit v1.2.3 From 99b47aaddfa985681496366e131daa69e282bb2a Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:51 -0700 Subject: drm/i915/guc: Implement parallel context pin / unpin functions Parallel contexts are perma-pinned by the upper layers which makes the backend implementation rather simple. The parent pins the guc_id and children increment the parent's pin count on pin to ensure all the contexts are unpinned before we disable scheduling with the GuC / or deregister the context. v2: (Daniel Vetter) - Perma-pin parallel contexts Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-12-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 70 +++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 7ba4f1d6f69c..a412593342db 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2584,6 +2584,76 @@ static const struct intel_context_ops virtual_guc_context_ops = { .get_sibling = guc_virtual_get_sibling, }; +/* Future patches will use this function */ +__maybe_unused +static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) +{ + struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + struct intel_guc *guc = ce_to_guc(ce); + int ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + ret = pin_guc_id(guc, ce); + if (unlikely(ret < 0)) + return ret; + + return __guc_context_pin(ce, engine, vaddr); +} + +/* Future patches will use this function */ +__maybe_unused +static int guc_child_context_pin(struct intel_context *ce, void *vaddr) +{ + struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + __intel_context_pin(ce->parallel.parent); + return __guc_context_pin(ce, engine, vaddr); +} + +/* Future patches will use this function */ +__maybe_unused +static void guc_parent_context_unpin(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + GEM_BUG_ON(!intel_context_is_parent(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + unpin_guc_id(guc, ce); + lrc_unpin(ce); +} + +/* Future patches will use this function */ +__maybe_unused +static void guc_child_context_unpin(struct intel_context *ce) +{ + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + lrc_unpin(ce); +} + +/* Future patches will use this function */ +__maybe_unused +static void guc_child_context_post_unpin(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + lrc_post_unpin(ce); + intel_context_unpin(ce->parallel.parent); +} + static bool guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b) { -- cgit v1.2.3 From 6b540bf6f14362a912fd79e0e200bf4fa2b6b547 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:52 -0700 Subject: drm/i915/guc: Implement multi-lrc submission Implement multi-lrc submission via a single workqueue entry and single H2G. The workqueue entry contains an updated tail value for each request, of all the contexts in the multi-lrc submission, and updates these values simultaneously. As such, the tasklet and bypass path have been updated to coalesce requests into a single submission. v2: (John Harrison) - s/wqe/wqi - Use FIELD_PREP macros - Add GEM_BUG_ONs ensures length fits within field - Add comment / white space to intel_guc_write_barrier (Kernel test robot) - Make need_tasklet a static function v3: (Docs) - A comment for submission_stall_reason v4: (Kernel test robot) - Initialize return value in bypass tasklt submit function (John Harrison) - Add comment near work queue defs - Add BUILD_BUG_ON to ensure WQ_SIZE is a power of 2 - Update write_barrier comment to talk about work queue v5: (John Harrison) - Fix typo in work queue comment Reviewed-by: John Harrison Signed-off-by: Matthew Brost Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-13-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 29 ++ drivers/gpu/drm/i915/gt/uc/intel_guc.h | 11 + drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 24 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 30 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 323 +++++++++++++++++++--- drivers/gpu/drm/i915/i915_request.h | 8 + 6 files changed, 350 insertions(+), 75 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 8f8182bf7c11..6e228343e8cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -756,3 +756,32 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) } } } + +void intel_guc_write_barrier(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { + /* + * Ensure intel_uncore_write_fw can be used rather than + * intel_uncore_write. + */ + GEM_BUG_ON(guc->send_regs.fw_domains); + + /* + * This register is used by the i915 and GuC for MMIO based + * communication. Once we are in this code CTBs are the only + * method the i915 uses to communicate with the GuC so it is + * safe to write to this register (a value of 0 is NOP for MMIO + * communication). If we ever start mixing CTBs and MMIOs a new + * register will have to be chosen. This function is also used + * to enforce ordering of a work queue item write and an update + * to the process descriptor. When a work queue is being used, + * CTBs are also the only mechanism of communication. + */ + intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); + } else { + /* wmb() sufficient for a barrier if in smem */ + wmb(); + } +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 4ca197f400ba..31cf9fb48c7e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -46,6 +46,15 @@ struct intel_guc { * submitted until the stalled request is processed. */ struct i915_request *stalled_request; + /** + * @submission_stall_reason: reason why submission is stalled + */ + enum { + STALL_NONE, + STALL_REGISTER_CONTEXT, + STALL_MOVE_LRC_TAIL, + STALL_ADD_REQUEST, + } submission_stall_reason; /* intel_guc_recv interrupt related state */ /** @irq_lock: protects GuC irq state */ @@ -367,4 +376,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc); void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); +void intel_guc_write_barrier(struct intel_guc *guc); + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 0a3504bc0b61..a0cc34be7b56 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -383,28 +383,6 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) return ++ct->requests.last_fence; } -static void write_barrier(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_gt *gt = guc_to_gt(guc); - - if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { - GEM_BUG_ON(guc->send_regs.fw_domains); - /* - * This register is used by the i915 and GuC for MMIO based - * communication. Once we are in this code CTBs are the only - * method the i915 uses to communicate with the GuC so it is - * safe to write to this register (a value of 0 is NOP for MMIO - * communication). If we ever start mixing CTBs and MMIOs a new - * register will have to be chosen. - */ - intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); - } else { - /* wmb() sufficient for a barrier if in smem */ - wmb(); - } -} - static int ct_write(struct intel_guc_ct *ct, const u32 *action, u32 len /* in dwords */, @@ -474,7 +452,7 @@ static int ct_write(struct intel_guc_ct *ct, * make sure H2G buffer update and LRC tail update (if this triggering a * submission) are visible before updating the descriptor tail */ - write_barrier(ct); + intel_guc_write_barrier(ct_to_guc(ct)); /* update local copies */ ctb->tail = tail; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 0eeb2a9feeed..18da67cfcd92 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -52,25 +52,27 @@ #define GUC_DOORBELL_INVALID 256 -/* Work queue item header definitions */ +/* + * Work queue item header definitions + * + * Work queue is circular buffer used to submit complex (multi-lrc) submissions + * to the GuC. A work queue item is an entry in the circular buffer. + */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 #define WQ_STATUS_CMD_ERROR 3 #define WQ_STATUS_ENGINE_ID_NOT_USED 4 #define WQ_STATUS_SUSPENDED_FROM_RESET 5 -#define WQ_TYPE_SHIFT 0 -#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) -#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) -#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) -#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) -#define WQ_TARGET_SHIFT 10 -#define WQ_LEN_SHIFT 16 -#define WQ_NO_WCFLUSH_WAIT (1 << 27) -#define WQ_PRESENT_WORKLOAD (1 << 28) - -#define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ -#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) +#define WQ_TYPE_BATCH_BUF 0x1 +#define WQ_TYPE_PSEUDO 0x2 +#define WQ_TYPE_INORDER 0x3 +#define WQ_TYPE_NOOP 0x4 +#define WQ_TYPE_MULTI_LRC 0x5 +#define WQ_TYPE_MASK GENMASK(7, 0) +#define WQ_LEN_MASK GENMASK(26, 16) + +#define WQ_GUC_ID_MASK GENMASK(15, 0) +#define WQ_RING_TAIL_MASK GENMASK(28, 18) #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index a412593342db..71ae5eb69849 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -400,6 +400,29 @@ __get_process_desc(struct intel_context *ce) LRC_STATE_OFFSET) / sizeof(u32))); } +static u32 *get_wq_pointer(struct guc_process_desc *desc, + struct intel_context *ce, + u32 wqi_size) +{ + /* + * Check for space in work queue. Caching a value of head pointer in + * intel_context structure in order reduce the number accesses to shared + * GPU memory which may be across a PCIe bus. + */ +#define AVAILABLE_SPACE \ + CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) + if (wqi_size > AVAILABLE_SPACE) { + ce->parallel.guc.wqi_head = READ_ONCE(desc->head); + + if (wqi_size > AVAILABLE_SPACE) + return NULL; + } +#undef AVAILABLE_SPACE + + return ((u32 *)__get_process_desc(ce)) + + ((WQ_OFFSET + ce->parallel.guc.wqi_tail) / sizeof(u32)); +} + static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) { struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; @@ -559,10 +582,10 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) static int guc_lrc_desc_pin(struct intel_context *ce, bool loop); -static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) +static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) { int err = 0; - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u32 action[3]; int len = 0; u32 g2h_len_dw = 0; @@ -583,26 +606,17 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); - /* - * Corner case where the GuC firmware was blown away and reloaded while - * this context was pinned. - */ - if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id))) { - err = guc_lrc_desc_pin(ce, false); - if (unlikely(err)) - return err; - } - spin_lock(&ce->guc_state.lock); /* * The request / context will be run on the hardware when scheduling - * gets enabled in the unblock. + * gets enabled in the unblock. For multi-lrc we still submit the + * context to move the LRC tails. */ - if (unlikely(context_blocked(ce))) + if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce))) goto out; - enabled = context_enabled(ce); + enabled = context_enabled(ce) || context_blocked(ce); if (!enabled) { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; @@ -621,6 +635,18 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) trace_intel_context_sched_enable(ce); atomic_inc(&guc->outstanding_submission_g2h); set_context_enabled(ce); + + /* + * Without multi-lrc KMD does the submission step (moving the + * lrc tail) so enabling scheduling is sufficient to submit the + * context. This isn't the case in multi-lrc submission as the + * GuC needs to move the tails, hence the need for another H2G + * to submit a multi-lrc context after enabling scheduling. + */ + if (intel_context_is_parent(ce)) { + action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT; + err = intel_guc_send_nb(guc, action, len - 1, 0); + } } else if (!enabled) { clr_context_pending_enable(ce); intel_context_put(ce); @@ -633,6 +659,18 @@ out: return err; } +static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) +{ + int ret = __guc_add_request(guc, rq); + + if (unlikely(ret == -EBUSY)) { + guc->stalled_request = rq; + guc->submission_stall_reason = STALL_ADD_REQUEST; + } + + return ret; +} + static inline void guc_set_lrc_tail(struct i915_request *rq) { rq->context->lrc_reg_state[CTX_RING_TAIL] = @@ -644,6 +682,136 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority; } +static bool is_multi_lrc_rq(struct i915_request *rq) +{ + return intel_context_is_child(rq->context) || + intel_context_is_parent(rq->context); +} + +static bool can_merge_rq(struct i915_request *rq, + struct i915_request *last) +{ + return request_to_scheduling_context(rq) == + request_to_scheduling_context(last); +} + +static u32 wq_space_until_wrap(struct intel_context *ce) +{ + return (WQ_SIZE - ce->parallel.guc.wqi_tail); +} + +static void write_wqi(struct guc_process_desc *desc, + struct intel_context *ce, + u32 wqi_size) +{ + BUILD_BUG_ON(!is_power_of_2(WQ_SIZE)); + + /* + * Ensure WQI are visible before updating tail + */ + intel_guc_write_barrier(ce_to_guc(ce)); + + ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & + (WQ_SIZE - 1); + WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail); +} + +static int guc_wq_noop_append(struct intel_context *ce) +{ + struct guc_process_desc *desc = __get_process_desc(ce); + u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce)); + u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; + + if (!wqi) + return -EBUSY; + + GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); + + *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | + FIELD_PREP(WQ_LEN_MASK, len_dw); + ce->parallel.guc.wqi_tail = 0; + + return 0; +} + +static int __guc_wq_item_append(struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + struct intel_context *child; + struct guc_process_desc *desc = __get_process_desc(ce); + unsigned int wqi_size = (ce->parallel.number_children + 4) * + sizeof(u32); + u32 *wqi; + u32 len_dw = (wqi_size / sizeof(u32)) - 1; + int ret; + + /* Ensure context is in correct state updating work queue */ + GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); + GEM_BUG_ON(context_guc_id_invalid(ce)); + GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); + GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); + + /* Insert NOOP if this work queue item will wrap the tail pointer. */ + if (wqi_size > wq_space_until_wrap(ce)) { + ret = guc_wq_noop_append(ce); + if (ret) + return ret; + } + + wqi = get_wq_pointer(desc, ce, wqi_size); + if (!wqi) + return -EBUSY; + + GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); + + *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) | + FIELD_PREP(WQ_LEN_MASK, len_dw); + *wqi++ = ce->lrc.lrca; + *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) | + FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64)); + *wqi++ = 0; /* fence_id */ + for_each_child(ce, child) + *wqi++ = child->ring->tail / sizeof(u64); + + write_wqi(desc, ce, wqi_size); + + return 0; +} + +static int guc_wq_item_append(struct intel_guc *guc, + struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + int ret = 0; + + if (likely(!intel_context_is_banned(ce))) { + ret = __guc_wq_item_append(rq); + + if (unlikely(ret == -EBUSY)) { + guc->stalled_request = rq; + guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; + } + } + + return ret; +} + +static bool multi_lrc_submit(struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + + intel_ring_set_tail(rq->ring, rq->tail); + + /* + * We expect the front end (execbuf IOCTL) to set this flag on the last + * request generated from a multi-BB submission. This indicates to the + * backend (GuC interface) that we should submit this context thus + * submitting all the requests generated in parallel. + */ + return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) || + intel_context_is_banned(ce); +} + static int guc_dequeue_one_context(struct intel_guc *guc) { struct i915_sched_engine * const sched_engine = guc->sched_engine; @@ -657,7 +825,17 @@ static int guc_dequeue_one_context(struct intel_guc *guc) if (guc->stalled_request) { submit = true; last = guc->stalled_request; - goto resubmit; + + switch (guc->submission_stall_reason) { + case STALL_REGISTER_CONTEXT: + goto register_context; + case STALL_MOVE_LRC_TAIL: + goto move_lrc_tail; + case STALL_ADD_REQUEST: + goto add_request; + default: + MISSING_CASE(guc->submission_stall_reason); + } } while ((rb = rb_first_cached(&sched_engine->queue))) { @@ -665,8 +843,8 @@ static int guc_dequeue_one_context(struct intel_guc *guc) struct i915_request *rq, *rn; priolist_for_each_request_consume(rq, rn, p) { - if (last && rq->context != last->context) - goto done; + if (last && !can_merge_rq(rq, last)) + goto register_context; list_del_init(&rq->sched.link); @@ -674,33 +852,84 @@ static int guc_dequeue_one_context(struct intel_guc *guc) trace_i915_request_in(rq, 0); last = rq; - submit = true; + + if (is_multi_lrc_rq(rq)) { + /* + * We need to coalesce all multi-lrc requests in + * a relationship into a single H2G. We are + * guaranteed that all of these requests will be + * submitted sequentially. + */ + if (multi_lrc_submit(rq)) { + submit = true; + goto register_context; + } + } else { + submit = true; + } } rb_erase_cached(&p->node, &sched_engine->queue); i915_priolist_free(p); } -done: + +register_context: if (submit) { - guc_set_lrc_tail(last); -resubmit: + struct intel_context *ce = request_to_scheduling_context(last); + + if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && + !intel_context_is_banned(ce))) { + ret = guc_lrc_desc_pin(ce, false); + if (unlikely(ret == -EPIPE)) { + goto deadlk; + } else if (ret == -EBUSY) { + guc->stalled_request = last; + guc->submission_stall_reason = + STALL_REGISTER_CONTEXT; + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ + goto deadlk; + } + } + +move_lrc_tail: + if (is_multi_lrc_rq(last)) { + ret = guc_wq_item_append(guc, last); + if (ret == -EBUSY) { + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ + goto deadlk; + } + } else { + guc_set_lrc_tail(last); + } + +add_request: ret = guc_add_request(guc, last); - if (unlikely(ret == -EPIPE)) + if (unlikely(ret == -EPIPE)) { + goto deadlk; + } else if (ret == -EBUSY) { + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ goto deadlk; - else if (ret == -EBUSY) { - tasklet_schedule(&sched_engine->tasklet); - guc->stalled_request = last; - return false; } } guc->stalled_request = NULL; + guc->submission_stall_reason = STALL_NONE; return submit; deadlk: sched_engine->tasklet.callback = NULL; tasklet_disable_nosync(&sched_engine->tasklet); return false; + +schedule_tasklet: + tasklet_schedule(&sched_engine->tasklet); + return false; } static void guc_submission_tasklet(struct tasklet_struct *t) @@ -1250,16 +1479,22 @@ static inline void queue_request(struct i915_sched_engine *sched_engine, static int guc_bypass_tasklet_submit(struct intel_guc *guc, struct i915_request *rq) { - int ret; + int ret = 0; __i915_request_submit(rq); trace_i915_request_in(rq, 0); - guc_set_lrc_tail(rq); - ret = guc_add_request(guc, rq); - if (ret == -EBUSY) - guc->stalled_request = rq; + if (is_multi_lrc_rq(rq)) { + if (multi_lrc_submit(rq)) { + ret = guc_wq_item_append(guc, rq); + if (!ret) + ret = guc_add_request(guc, rq); + } + } else { + guc_set_lrc_tail(rq); + ret = guc_add_request(guc, rq); + } if (unlikely(ret == -EPIPE)) disable_submission(guc); @@ -1267,6 +1502,16 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc, return ret; } +static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) +{ + struct i915_sched_engine *sched_engine = rq->engine->sched_engine; + struct intel_context *ce = request_to_scheduling_context(rq); + + return submission_disabled(guc) || guc->stalled_request || + !i915_sched_engine_is_empty(sched_engine) || + !lrc_desc_registered(guc, ce->guc_id.id); +} + static void guc_submit_request(struct i915_request *rq) { struct i915_sched_engine *sched_engine = rq->engine->sched_engine; @@ -1276,8 +1521,7 @@ static void guc_submit_request(struct i915_request *rq) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&sched_engine->lock, flags); - if (submission_disabled(guc) || guc->stalled_request || - !i915_sched_engine_is_empty(sched_engine)) + if (need_tasklet(guc, rq)) queue_request(sched_engine, rq, rq_prio(rq)); else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY) tasklet_hi_schedule(&sched_engine->tasklet); @@ -2259,9 +2503,10 @@ static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) static void add_to_context(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq)); + GEM_BUG_ON(intel_context_is_child(ce)); GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI); spin_lock(&ce->guc_state.lock); @@ -2294,7 +2539,9 @@ static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) static void remove_from_context(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); + + GEM_BUG_ON(intel_context_is_child(ce)); spin_lock_irq(&ce->guc_state.lock); @@ -2713,7 +2960,7 @@ static void guc_init_breadcrumbs(struct intel_engine_cs *engine) static void guc_bump_inflight_request_prio(struct i915_request *rq, int prio) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u8 new_guc_prio = map_i915_prio_to_guc_prio(prio); /* Short circuit function */ @@ -2736,7 +2983,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, static void guc_retire_inflight_request_prio(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); spin_lock(&ce->guc_state.lock); guc_prio_fini(rq, ce); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7bd9ed20623e..8950785e55d6 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -139,6 +139,14 @@ enum { * the GPU. Here we track such boost requests on a per-request basis. */ I915_FENCE_FLAG_BOOST, + + /* + * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a + * parent-child relationship (parallel submission, multi-lrc) should + * trigger a submission to the GuC rather than just moving the context + * tail. + */ + I915_FENCE_FLAG_SUBMIT_PARALLEL, }; /** -- cgit v1.2.3 From bc955204919ea8152b7443e7d48a48cc18dea448 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:53 -0700 Subject: drm/i915/guc: Insert submit fences between requests in parent-child relationship The GuC must receive requests in the order submitted for contexts in a parent-child relationship to function correctly. To ensure this, insert a submit fence between the current request and last request submitted for requests / contexts in a parent child relationship. This is conceptually similar to a single timeline. Signed-off-by: Matthew Brost Cc: John Harrison Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-14-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.h | 5 + drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 5 +- drivers/gpu/drm/i915/i915_request.c | 120 +++++++++++++++++----- 4 files changed, 108 insertions(+), 28 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 9f0995150a7a..edf12caaade3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -77,6 +77,11 @@ intel_context_to_parent(struct intel_context *ce) } } +static inline bool intel_context_is_parallel(struct intel_context *ce) +{ + return intel_context_is_child(ce) || intel_context_is_parent(ce); +} + void intel_context_bind_parent_child(struct intel_context *parent, struct intel_context *child); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 48decb5ee954..8309d1141d0a 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -237,6 +237,12 @@ struct intel_context { }; /** @parent: pointer to parent if child */ struct intel_context *parent; + /** + * @last_rq: last request submitted on a parallel context, used + * to insert submit fences between requests in the parallel + * context + */ + struct i915_request *last_rq; /** @number_children: number of children if parent */ u8 number_children; /** @guc: GuC specific members for parallel submission */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 71ae5eb69849..ebb64fb50396 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -684,8 +684,7 @@ static inline int rq_prio(const struct i915_request *rq) static bool is_multi_lrc_rq(struct i915_request *rq) { - return intel_context_is_child(rq->context) || - intel_context_is_parent(rq->context); + return intel_context_is_parallel(rq->context); } static bool can_merge_rq(struct i915_request *rq, @@ -2873,6 +2872,8 @@ static void guc_parent_context_unpin(struct intel_context *ce) GEM_BUG_ON(!intel_context_is_parent(ce)); GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + if (ce->parallel.last_rq) + i915_request_put(ce->parallel.last_rq); unpin_guc_id(guc, ce); lrc_unpin(ce); } diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ed64fa9defdf..d29e46a001b4 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1549,36 +1549,62 @@ i915_request_await_object(struct i915_request *to, return ret; } +static inline bool is_parallel_rq(struct i915_request *rq) +{ + return intel_context_is_parallel(rq->context); +} + +static inline struct intel_context *request_to_parent(struct i915_request *rq) +{ + return intel_context_to_parent(rq->context); +} + static struct i915_request * -__i915_request_add_to_timeline(struct i915_request *rq) +__i915_request_ensure_parallel_ordering(struct i915_request *rq, + struct intel_timeline *timeline) { - struct intel_timeline *timeline = i915_request_timeline(rq); struct i915_request *prev; - /* - * Dependency tracking and request ordering along the timeline - * is special cased so that we can eliminate redundant ordering - * operations while building the request (we know that the timeline - * itself is ordered, and here we guarantee it). - * - * As we know we will need to emit tracking along the timeline, - * we embed the hooks into our request struct -- at the cost of - * having to have specialised no-allocation interfaces (which will - * be beneficial elsewhere). - * - * A second benefit to open-coding i915_request_await_request is - * that we can apply a slight variant of the rules specialised - * for timelines that jump between engines (such as virtual engines). - * If we consider the case of virtual engine, we must emit a dma-fence - * to prevent scheduling of the second request until the first is - * complete (to maximise our greedy late load balancing) and this - * precludes optimising to use semaphores serialisation of a single - * timeline across engines. - */ + GEM_BUG_ON(!is_parallel_rq(rq)); + + prev = request_to_parent(rq)->parallel.last_rq; + if (prev) { + if (!__i915_request_is_complete(prev)) { + i915_sw_fence_await_sw_fence(&rq->submit, + &prev->submit, + &rq->submitq); + + if (rq->engine->sched_engine->schedule) + __i915_sched_node_add_dependency(&rq->sched, + &prev->sched, + &rq->dep, + 0); + } + i915_request_put(prev); + } + + request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + + return to_request(__i915_active_fence_set(&timeline->last_request, + &rq->fence)); +} + +static struct i915_request * +__i915_request_ensure_ordering(struct i915_request *rq, + struct intel_timeline *timeline) +{ + struct i915_request *prev; + + GEM_BUG_ON(is_parallel_rq(rq)); + prev = to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); + if (prev && !__i915_request_is_complete(prev)) { bool uses_guc = intel_engine_uses_guc(rq->engine); + bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask | + rq->engine->mask); + bool same_context = prev->context == rq->context; /* * The requests are supposed to be kept in order. However, @@ -1586,13 +1612,11 @@ __i915_request_add_to_timeline(struct i915_request *rq) * is used as a barrier for external modification to this * context. */ - GEM_BUG_ON(prev->context == rq->context && + GEM_BUG_ON(same_context && i915_seqno_passed(prev->fence.seqno, rq->fence.seqno)); - if ((!uses_guc && - is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) || - (uses_guc && prev->context == rq->context)) + if ((same_context && uses_guc) || (!uses_guc && pow2)) i915_sw_fence_await_sw_fence(&rq->submit, &prev->submit, &rq->submitq); @@ -1607,6 +1631,50 @@ __i915_request_add_to_timeline(struct i915_request *rq) 0); } + return prev; +} + +static struct i915_request * +__i915_request_add_to_timeline(struct i915_request *rq) +{ + struct intel_timeline *timeline = i915_request_timeline(rq); + struct i915_request *prev; + + /* + * Dependency tracking and request ordering along the timeline + * is special cased so that we can eliminate redundant ordering + * operations while building the request (we know that the timeline + * itself is ordered, and here we guarantee it). + * + * As we know we will need to emit tracking along the timeline, + * we embed the hooks into our request struct -- at the cost of + * having to have specialised no-allocation interfaces (which will + * be beneficial elsewhere). + * + * A second benefit to open-coding i915_request_await_request is + * that we can apply a slight variant of the rules specialised + * for timelines that jump between engines (such as virtual engines). + * If we consider the case of virtual engine, we must emit a dma-fence + * to prevent scheduling of the second request until the first is + * complete (to maximise our greedy late load balancing) and this + * precludes optimising to use semaphores serialisation of a single + * timeline across engines. + * + * We do not order parallel submission requests on the timeline as each + * parallel submission context has its own timeline and the ordering + * rules for parallel requests are that they must be submitted in the + * order received from the execbuf IOCTL. So rather than using the + * timeline we store a pointer to last request submitted in the + * relationship in the gem context and insert a submission fence + * between that request and request passed into this function or + * alternatively we use completion fence if gem context has a single + * timeline and this is the first submission of an execbuf IOCTL. + */ + if (likely(!is_parallel_rq(rq))) + prev = __i915_request_ensure_ordering(rq, timeline); + else + prev = __i915_request_ensure_parallel_ordering(rq, timeline); + /* * Make sure that no request gazumped us - if it was allocated after * our i915_request_alloc() and called __i915_request_add() before -- cgit v1.2.3 From 872758dbdb93324ba60d58e70ea2ee04cc7cbad8 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:54 -0700 Subject: drm/i915/guc: Implement multi-lrc reset Update context and full GPU reset to work with multi-lrc. The idea is parent context tracks all the active requests inflight for itself and its children. The parent context owns the reset replaying / canceling requests as needed. v2: (John Harrison) - Simply loop in find active request - Add comments to find ative request / reset loop v3: (John Harrison) - s/its'/its/g - Fix comment when searching for active request - Reorder if state in __guc_reset_context v4: (Kernel test robot) - Delete unused is_multi_lrc function Reviewed-by: John Harrison Signed-off-by: Matthew Brost Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-15-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 15 ++++-- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 64 ++++++++++++++++------- 2 files changed, 58 insertions(+), 21 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 4d95053855c9..709c433a2dcc 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -528,20 +528,29 @@ retry: struct i915_request *intel_context_find_active_request(struct intel_context *ce) { + struct intel_context *parent = intel_context_to_parent(ce); struct i915_request *rq, *active = NULL; unsigned long flags; GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); - spin_lock_irqsave(&ce->guc_state.lock, flags); - list_for_each_entry_reverse(rq, &ce->guc_state.requests, + /* + * We search the parent list to find an active request on the submitted + * context. The parent list contains the requests for all the contexts + * in the relationship so we have to do a compare of each request's + * context. + */ + spin_lock_irqsave(&parent->guc_state.lock, flags); + list_for_each_entry_reverse(rq, &parent->guc_state.requests, sched.link) { + if (rq->context != ce) + continue; if (i915_request_completed(rq)) break; active = rq; } - spin_unlock_irqrestore(&ce->guc_state.lock, flags); + spin_unlock_irqrestore(&parent->guc_state.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ebb64fb50396..112b5e6fe39d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1217,10 +1217,15 @@ __unwind_incomplete_requests(struct intel_context *ce) static void __guc_reset_context(struct intel_context *ce, bool stalled) { + bool local_stalled; struct i915_request *rq; unsigned long flags; u32 head; + int i, number_children = ce->parallel.number_children; bool skip = false; + struct intel_context *parent = ce; + + GEM_BUG_ON(intel_context_is_child(ce)); intel_context_get(ce); @@ -1246,25 +1251,38 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) if (unlikely(skip)) goto out_put; - rq = intel_context_find_active_request(ce); - if (!rq) { - head = ce->ring->tail; - stalled = false; - goto out_replay; - } + /* + * For each context in the relationship find the hanging request + * resetting each context / request as needed + */ + for (i = 0; i < number_children + 1; ++i) { + if (!intel_context_is_pinned(ce)) + goto next_context; + + local_stalled = false; + rq = intel_context_find_active_request(ce); + if (!rq) { + head = ce->ring->tail; + goto out_replay; + } - if (!i915_request_started(rq)) - stalled = false; + if (i915_request_started(rq)) + local_stalled = true; - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - head = intel_ring_wrap(ce->ring, rq->head); - __i915_request_reset(rq, stalled); + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + head = intel_ring_wrap(ce->ring, rq->head); + __i915_request_reset(rq, local_stalled && stalled); out_replay: - guc_reset_state(ce, head, stalled); - __unwind_incomplete_requests(ce); + guc_reset_state(ce, head, local_stalled && stalled); +next_context: + if (i != number_children) + ce = list_next_entry(ce, parallel.child_link); + } + + __unwind_incomplete_requests(parent); out_put: - intel_context_put(ce); + intel_context_put(parent); } void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) @@ -1285,7 +1303,8 @@ void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) xa_unlock(&guc->context_lookup); - if (intel_context_is_pinned(ce)) + if (intel_context_is_pinned(ce) && + !intel_context_is_child(ce)) __guc_reset_context(ce, stalled); intel_context_put(ce); @@ -1377,7 +1396,8 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) xa_unlock(&guc->context_lookup); - if (intel_context_is_pinned(ce)) + if (intel_context_is_pinned(ce) && + !intel_context_is_child(ce)) guc_cancel_context_requests(ce); intel_context_put(ce); @@ -2070,6 +2090,8 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) u16 guc_id; bool enabled; + GEM_BUG_ON(intel_context_is_child(ce)); + spin_lock_irqsave(&ce->guc_state.lock, flags); incr_context_blocked(ce); @@ -2124,6 +2146,7 @@ static void guc_context_unblock(struct intel_context *ce) bool enable; GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_child(ce)); spin_lock_irqsave(&ce->guc_state.lock, flags); @@ -2150,11 +2173,14 @@ static void guc_context_unblock(struct intel_context *ce) static void guc_context_cancel_request(struct intel_context *ce, struct i915_request *rq) { + struct intel_context *block_context = + request_to_scheduling_context(rq); + if (i915_sw_fence_signaled(&rq->submit)) { struct i915_sw_fence *fence; intel_context_get(ce); - fence = guc_context_block(ce); + fence = guc_context_block(block_context); i915_sw_fence_wait(fence); if (!i915_request_completed(rq)) { __i915_request_skip(rq); @@ -2168,7 +2194,7 @@ static void guc_context_cancel_request(struct intel_context *ce, */ flush_work(&ce_to_guc(ce)->ct.requests.worker); - guc_context_unblock(ce); + guc_context_unblock(block_context); intel_context_put(ce); } } @@ -2194,6 +2220,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) intel_wakeref_t wakeref; unsigned long flags; + GEM_BUG_ON(intel_context_is_child(ce)); + guc_flush_submissions(guc); spin_lock_irqsave(&ce->guc_state.lock, flags); -- cgit v1.2.3 From d38a9294491dcc38b0508c865b59604f66fb08c4 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:55 -0700 Subject: drm/i915/guc: Update debugfs for GuC multi-lrc Display the workqueue status in debugfs for GuC contexts that are in parent-child relationship. v2: (John Harrison) - Output number children in debugfs Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-16-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 52 +++++++++++++++++------ 1 file changed, 38 insertions(+), 14 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 112b5e6fe39d..938dc34e8d3a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -3702,6 +3702,25 @@ static inline void guc_log_context_priority(struct drm_printer *p, drm_printf(p, "\n"); } +static inline void guc_log_context(struct drm_printer *p, + struct intel_context *ce) +{ + drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); + drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); + drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", + ce->ring->head, + ce->lrc_reg_state[CTX_RING_HEAD]); + drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", + ce->ring->tail, + ce->lrc_reg_state[CTX_RING_TAIL]); + drm_printf(p, "\t\tContext Pin Count: %u\n", + atomic_read(&ce->pin_count)); + drm_printf(p, "\t\tGuC ID Ref Count: %u\n", + atomic_read(&ce->guc_id.ref)); + drm_printf(p, "\t\tSchedule State: 0x%x\n\n", + ce->guc_state.sched_state); +} + void intel_guc_submission_print_context_info(struct intel_guc *guc, struct drm_printer *p) { @@ -3711,22 +3730,27 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); - drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); - drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", - ce->ring->head, - ce->lrc_reg_state[CTX_RING_HEAD]); - drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", - ce->ring->tail, - ce->lrc_reg_state[CTX_RING_TAIL]); - drm_printf(p, "\t\tContext Pin Count: %u\n", - atomic_read(&ce->pin_count)); - drm_printf(p, "\t\tGuC ID Ref Count: %u\n", - atomic_read(&ce->guc_id.ref)); - drm_printf(p, "\t\tSchedule State: 0x%x\n\n", - ce->guc_state.sched_state); + GEM_BUG_ON(intel_context_is_child(ce)); + guc_log_context(p, ce); guc_log_context_priority(p, ce); + + if (intel_context_is_parent(ce)) { + struct guc_process_desc *desc = __get_process_desc(ce); + struct intel_context *child; + + drm_printf(p, "\t\tNumber children: %u\n", + ce->parallel.number_children); + drm_printf(p, "\t\tWQI Head: %u\n", + READ_ONCE(desc->head)); + drm_printf(p, "\t\tWQI Tail: %u\n", + READ_ONCE(desc->tail)); + drm_printf(p, "\t\tWQI Status: %u\n\n", + READ_ONCE(desc->wq_status)); + + for_each_child(ce, child) + guc_log_context(p, child); + } } xa_unlock_irqrestore(&guc->context_lookup, flags); } -- cgit v1.2.3 From e5e32171a2cf1e434d4f88e12467f3e47d0ec618 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:56 -0700 Subject: drm/i915/guc: Connect UAPI to GuC multi-lrc interface Introduce 'set parallel submit' extension to connect UAPI to GuC multi-lrc interface. Kernel doc in new uAPI should explain it all. IGT: https://patchwork.freedesktop.org/patch/447008/?series=93071&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Daniel Vetter) - Add IGT link and placeholder for media UMD link v3: (Kernel test robot) - Fix warning in unpin engines call (John Harrison) - Reword a bunch of the kernel doc v4: (John Harrison) - Add comment why perma-pin is done after setting gem context - Update some comments / docs for proto contexts v5: (John Harrison) - Rework perma-pin comment - Add BUG_IN if context is pinned when setting gem context Cc: Tvrtko Ursulin Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-17-matthew.brost@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 230 ++++++++++++++++++++- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 16 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 9 +- drivers/gpu/drm/i915/gt/intel_engine.h | 12 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 6 +- .../gpu/drm/i915/gt/intel_execlists_submission.c | 6 +- drivers/gpu/drm/i915/gt/selftest_execlists.c | 12 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 114 ++++++++-- 8 files changed, 374 insertions(+), 31 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index d225d3dd0b40..9a00f11fef46 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -556,9 +556,150 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) return 0; } +static int +set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, + void *data) +{ + struct i915_context_engines_parallel_submit __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + u64 flags; + int err = 0, n, i, j; + u16 slot, width, num_siblings; + struct intel_engine_cs **siblings = NULL; + intel_engine_mask_t prev_mask; + + /* Disabling for now */ + return -ENODEV; + + /* FIXME: This is NIY for execlists */ + if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) + return -ENODEV; + + if (get_user(slot, &ext->engine_index)) + return -EFAULT; + + if (get_user(width, &ext->width)) + return -EFAULT; + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + if (slot >= set->num_engines) { + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + slot, set->num_engines); + return -EINVAL; + } + + if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", slot); + return -EINVAL; + } + + if (get_user(flags, &ext->flags)) + return -EFAULT; + + if (flags) { + drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); + return -EINVAL; + } + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (width < 2) { + drm_dbg(&i915->drm, "Width (%d) < 2\n", width); + return -EINVAL; + } + + if (num_siblings < 1) { + drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", + num_siblings); + return -EINVAL; + } + + siblings = kmalloc_array(num_siblings * width, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return -ENOMEM; + + /* Create contexts / engines */ + for (i = 0; i < width; ++i) { + intel_engine_mask_t current_mask = 0; + struct i915_engine_class_instance prev_engine; + + for (j = 0; j < num_siblings; ++j) { + struct i915_engine_class_instance ci; + + n = i * num_siblings + j; + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto out_err; + } + + siblings[n] = + intel_engine_lookup_user(i915, ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto out_err; + } + + if (n) { + if (prev_engine.engine_class != + ci.engine_class) { + drm_dbg(&i915->drm, + "Mismatched class %d, %d\n", + prev_engine.engine_class, + ci.engine_class); + err = -EINVAL; + goto out_err; + } + } + + prev_engine = ci; + current_mask |= siblings[n]->logical_mask; + } + + if (i > 0) { + if (current_mask != prev_mask << 1) { + drm_dbg(&i915->drm, + "Non contiguous logical mask 0x%x, 0x%x\n", + prev_mask, current_mask); + err = -EINVAL; + goto out_err; + } + } + prev_mask = current_mask; + } + + set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; + set->engines[slot].num_siblings = num_siblings; + set->engines[slot].width = width; + set->engines[slot].siblings = siblings; + + return 0; + +out_err: + kfree(siblings); + + return err; +} + static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, + [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = + set_proto_ctx_engines_parallel_submit, }; static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, @@ -794,6 +935,7 @@ static int intel_context_set_gem(struct intel_context *ce, GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); RCU_INIT_POINTER(ce->gem_context, ctx); + GEM_BUG_ON(intel_context_is_pinned(ce)); ce->ring_size = SZ_16K; i915_vm_put(ce->vm); @@ -818,6 +960,25 @@ static int intel_context_set_gem(struct intel_context *ce, return ret; } +static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) +{ + while (count--) { + struct intel_context *ce = e->engines[count], *child; + + if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) + continue; + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + } +} + +static void unpin_engines(struct i915_gem_engines *e) +{ + __unpin_engines(e, e->num_engines); +} + static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { @@ -933,6 +1094,40 @@ free_engines: return err; } +static int perma_pin_contexts(struct intel_context *ce) +{ + struct intel_context *child; + int i = 0, j = 0, ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + ret = intel_context_pin(ce); + if (unlikely(ret)) + return ret; + + for_each_child(ce, child) { + ret = intel_context_pin(child); + if (unlikely(ret)) + goto unwind; + ++i; + } + + set_bit(CONTEXT_PERMA_PIN, &ce->flags); + + return 0; + +unwind: + intel_context_unpin(ce); + for_each_child(ce, child) { + if (j++ < i) + intel_context_unpin(child); + else + break; + } + + return ret; +} + static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, unsigned int num_engines, struct i915_gem_proto_engine *pe) @@ -946,7 +1141,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, e->num_engines = num_engines; for (n = 0; n < num_engines; n++) { - struct intel_context *ce; + struct intel_context *ce, *child; int ret; switch (pe[n].type) { @@ -956,7 +1151,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, case I915_GEM_ENGINE_TYPE_BALANCED: ce = intel_engine_create_virtual(pe[n].siblings, - pe[n].num_siblings); + pe[n].num_siblings, 0); + break; + + case I915_GEM_ENGINE_TYPE_PARALLEL: + ce = intel_engine_create_parallel(pe[n].siblings, + pe[n].num_siblings, + pe[n].width); break; case I915_GEM_ENGINE_TYPE_INVALID: @@ -977,6 +1178,30 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, err = ERR_PTR(ret); goto free_engines; } + for_each_child(ce, child) { + ret = intel_context_set_gem(child, ctx, pe->sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } + + /* + * XXX: Must be done after calling intel_context_set_gem as that + * function changes the ring size. The ring is allocated when + * the context is pinned. If the ring size is changed after + * allocation we have a mismatch of the ring size and will cause + * the context to hang. Presumably with a bit of reordering we + * could move the perma-pin step to the backend function + * intel_engine_create_parallel. + */ + if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { + ret = perma_pin_contexts(ce); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } } return e; @@ -1219,6 +1444,7 @@ static void context_close(struct i915_gem_context *ctx) /* Flush any concurrent set_engines() */ mutex_lock(&ctx->engines_mutex); + unpin_engines(__context_engines_static(ctx)); engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); mutex_unlock(&ctx->engines_mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index a627b09c4680..282cdb8a5c5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -78,13 +78,16 @@ enum i915_gem_engine_type { /** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */ I915_GEM_ENGINE_TYPE_BALANCED, + + /** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */ + I915_GEM_ENGINE_TYPE_PARALLEL, }; /** * struct i915_gem_proto_engine - prototype engine * * This struct describes an engine that a context may contain. Engines - * have three types: + * have four types: * * - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they * show up as a NULL in i915_gem_engines::engines[i] and any attempt to @@ -97,6 +100,10 @@ enum i915_gem_engine_type { * * - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described * i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings. + * + * - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described + * i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and + * i915_gem_proto_engine::siblings. */ struct i915_gem_proto_engine { /** @type: Type of this engine */ @@ -105,10 +112,13 @@ struct i915_gem_proto_engine { /** @engine: Engine, for physical */ struct intel_engine_cs *engine; - /** @num_siblings: Number of balanced siblings */ + /** @num_siblings: Number of balanced or parallel siblings */ unsigned int num_siblings; - /** @siblings: Balanced siblings */ + /** @width: Width of each sibling */ + unsigned int width; + + /** @siblings: Balanced siblings or num_siblings * width for parallel */ struct intel_engine_cs **siblings; /** @sseu: Client-set SSEU parameters */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 8309d1141d0a..1d880303a7e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -55,9 +55,13 @@ struct intel_context_ops { void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); - /* virtual engine/context interface */ + /* virtual/parallel engine/context interface */ struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, - unsigned int count); + unsigned int count, + unsigned long flags); + struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, unsigned int sibling); }; @@ -113,6 +117,7 @@ struct intel_context { #define CONTEXT_NOPREEMPT 8 #define CONTEXT_LRCA_DIRTY 9 #define CONTEXT_GUC_INIT 10 +#define CONTEXT_PERMA_PIN 11 struct { u64 timeout_us; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index d5ac49c0691e..08559ace0ada 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -282,9 +282,19 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } +#define FORCE_VIRTUAL BIT(0) struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count); + unsigned int count, unsigned long flags); + +static inline struct intel_context * +intel_engine_create_parallel(struct intel_engine_cs **engines, + unsigned int num_engines, + unsigned int width) +{ + GEM_BUG_ON(!engines[0]->cops->create_parallel); + return engines[0]->cops->create_parallel(engines, num_engines, width); +} static inline bool intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 2eb798ad068b..ff6753ccb129 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1953,16 +1953,16 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count) + unsigned int count, unsigned long flags) { if (count == 0) return ERR_PTR(-EINVAL); - if (count == 1) + if (count == 1 && !(flags & FORCE_VIRTUAL)) return intel_context_create(siblings[0]); GEM_BUG_ON(!siblings[0]->cops->create_virtual); - return siblings[0]->cops->create_virtual(siblings, count); + return siblings[0]->cops->create_virtual(siblings, count, flags); } struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 43a74b216efb..bedb80057046 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -201,7 +201,8 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); static struct i915_request * __active_request(const struct intel_timeline * const tl, @@ -3784,7 +3785,8 @@ unlock: } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct virtual_engine *ve; unsigned int n; diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 25a8c4f62b0d..b367ecfa42de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt, GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); for (n = 0; n < nctx; n++) { - ve[n] = intel_engine_create_virtual(siblings, nsibling); + ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve[n])) { err = PTR_ERR(ve[n]); nctx = n; @@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt, * restrict it to our desired engine within the virtual engine. */ - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_close; @@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt, i915_request_add(rq); } - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt, /* XXX We do not handle oversubscription and fairness with normal rq */ for (n = 0; n < nsibling; n++) { - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt, if (err) goto out_scratch; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_scratch; @@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt, if (igt_spinner_init(&spin, gt)) return -ENOMEM; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_spin; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 938dc34e8d3a..7c12364a017a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -124,7 +124,13 @@ struct guc_virtual_engine { }; static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); #define GUC_REQUEST_SIZE 64 /* bytes */ @@ -2609,6 +2615,7 @@ static const struct intel_context_ops guc_context_ops = { .destroy = guc_context_destroy, .create_virtual = guc_create_virtual, + .create_parallel = guc_create_parallel, }; static void submit_work_cb(struct irq_work *wrk) @@ -2858,8 +2865,6 @@ static const struct intel_context_ops virtual_guc_context_ops = { .get_sibling = guc_virtual_get_sibling, }; -/* Future patches will use this function */ -__maybe_unused static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); @@ -2876,8 +2881,6 @@ static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) return __guc_context_pin(ce, engine, vaddr); } -/* Future patches will use this function */ -__maybe_unused static int guc_child_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); @@ -2889,8 +2892,6 @@ static int guc_child_context_pin(struct intel_context *ce, void *vaddr) return __guc_context_pin(ce, engine, vaddr); } -/* Future patches will use this function */ -__maybe_unused static void guc_parent_context_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); @@ -2906,8 +2907,6 @@ static void guc_parent_context_unpin(struct intel_context *ce) lrc_unpin(ce); } -/* Future patches will use this function */ -__maybe_unused static void guc_child_context_unpin(struct intel_context *ce) { GEM_BUG_ON(context_enabled(ce)); @@ -2918,8 +2917,6 @@ static void guc_child_context_unpin(struct intel_context *ce) lrc_unpin(ce); } -/* Future patches will use this function */ -__maybe_unused static void guc_child_context_post_unpin(struct intel_context *ce) { GEM_BUG_ON(!intel_context_is_child(ce)); @@ -2930,6 +2927,98 @@ static void guc_child_context_post_unpin(struct intel_context *ce) intel_context_unpin(ce->parallel.parent); } +static void guc_child_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + __guc_context_destroy(ce); +} + +static const struct intel_context_ops virtual_parent_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_parent_context_pin, + .unpin = guc_parent_context_unpin, + .post_unpin = guc_context_post_unpin, + + .ban = guc_context_ban, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .sched_disable = guc_context_sched_disable, + + .destroy = guc_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +static const struct intel_context_ops virtual_child_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_child_context_pin, + .unpin = guc_child_context_unpin, + .post_unpin = guc_child_context_post_unpin, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .destroy = guc_child_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width) +{ + struct intel_engine_cs **siblings = NULL; + struct intel_context *parent = NULL, *ce, *err; + int i, j; + + siblings = kmalloc_array(num_siblings, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < width; ++i) { + for (j = 0; j < num_siblings; ++j) + siblings[j] = engines[i * num_siblings + j]; + + ce = intel_engine_create_virtual(siblings, num_siblings, + FORCE_VIRTUAL); + if (!ce) { + err = ERR_PTR(-ENOMEM); + goto unwind; + } + + if (i == 0) { + parent = ce; + parent->ops = &virtual_parent_context_ops; + } else { + ce->ops = &virtual_child_context_ops; + intel_context_bind_parent_child(parent, ce); + } + } + + kfree(siblings); + return parent; + +unwind: + if (parent) + intel_context_put(parent); + kfree(siblings); + return err; +} + static bool guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b) { @@ -3756,7 +3845,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, } static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct guc_virtual_engine *ve; struct intel_guc *guc; -- cgit v1.2.3 From f9d72092cb4902af8b5d4e647589deb248cfb44d Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:58 -0700 Subject: drm/i915/guc: Add basic GuC multi-lrc selftest Add very basic (single submission) multi-lrc selftest. Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-19-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 1 + .../gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c | 179 +++++++++++++++++++++ .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 + 3 files changed, 181 insertions(+) create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 7c12364a017a..57eb5f8bc8bb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -3954,4 +3954,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_guc.c" +#include "selftest_guc_multi_lrc.c" #endif diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c new file mode 100644 index 000000000000..50953c8e8b53 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright �� 2019 Intel Corporation + */ + +#include "selftests/igt_spinner.h" +#include "selftests/igt_reset.h" +#include "selftests/intel_scheduler_helpers.h" +#include "gt/intel_engine_heartbeat.h" +#include "gem/selftests/mock_context.h" + +static void logical_sort(struct intel_engine_cs **engines, int num_engines) +{ + struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1]; + int i, j; + + for (i = 0; i < num_engines; ++i) + for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) { + if (engines[j]->logical_mask & BIT(i)) { + sorted[i] = engines[j]; + break; + } + } + + memcpy(*engines, *sorted, + sizeof(struct intel_engine_cs *) * num_engines); +} + +static struct intel_context * +multi_lrc_create_parent(struct intel_gt *gt, u8 class, + unsigned long flags) +{ + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int i = 0; + + for_each_engine(engine, gt, id) { + if (engine->class != class) + continue; + + siblings[i++] = engine; + } + + if (i <= 1) + return ERR_PTR(0); + + logical_sort(siblings, i); + + return intel_engine_create_parallel(siblings, 1, i); +} + +static void multi_lrc_context_unpin(struct intel_context *ce) +{ + struct intel_context *child; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); +} + +static void multi_lrc_context_put(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + /* + * Only the parent gets the creation ref put in the uAPI, the parent + * itself is responsible for creation ref put on the children. + */ + intel_context_put(ce); +} + +static struct i915_request * +multi_lrc_nop_request(struct intel_context *ce) +{ + struct intel_context *child; + struct i915_request *rq, *child_rq; + int i = 0; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + for_each_child(ce, child) { + child_rq = intel_context_create_request(child); + if (IS_ERR(child_rq)) + goto child_error; + + if (++i == ce->parallel.number_children) + set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, + &child_rq->fence.flags); + i915_request_add(child_rq); + } + + return rq; + +child_error: + i915_request_put(rq); + + return ERR_PTR(-ENOMEM); +} + +static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class) +{ + struct intel_context *parent; + struct i915_request *rq; + int ret; + + parent = multi_lrc_create_parent(gt, class, 0); + if (IS_ERR(parent)) { + pr_err("Failed creating contexts: %ld", PTR_ERR(parent)); + return PTR_ERR(parent); + } else if (!parent) { + pr_debug("Not enough engines in class: %d", class); + return 0; + } + + rq = multi_lrc_nop_request(parent); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + pr_err("Failed creating requests: %d", ret); + goto out; + } + + ret = intel_selftest_wait_for_rq(rq); + if (ret) + pr_err("Failed waiting on request: %d", ret); + + i915_request_put(rq); + + if (ret >= 0) { + ret = intel_gt_wait_for_idle(gt, HZ * 5); + if (ret < 0) + pr_err("GT failed to idle: %d\n", ret); + } + +out: + multi_lrc_context_unpin(parent); + multi_lrc_context_put(parent); + return ret; +} + +static int intel_guc_multi_lrc_basic(void *arg) +{ + struct intel_gt *gt = arg; + unsigned int class; + int ret; + + for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { + ret = __intel_guc_multi_lrc_basic(gt, class); + if (ret) + return ret; + } + + return 0; +} + +int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_guc_multi_lrc_basic), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + if (!intel_uc_uses_guc_submission(>->uc)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 3cf6758931f9..bdd290f2bf3c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -48,5 +48,6 @@ selftest(ring_submission, intel_ring_submission_live_selftests) selftest(perf, i915_perf_live_selftests) selftest(slpc, intel_slpc_live_selftests) selftest(guc, intel_guc_live_selftests) +selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests) /* Here be dragons: keep last to run last! */ selftest(late_gt_pm, intel_gt_pm_late_selftests) -- cgit v1.2.3 From 5851387a422c2949cb19b52efd9616ff8b18bddd Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:19:59 -0700 Subject: drm/i915/guc: Implement no mid batch preemption for multi-lrc For some users of multi-lrc, e.g. split frame, it isn't safe to preempt mid BB. To safely enable preemption at the BB boundary, a handshake between parent and child is needed, syncing the set of BBs at the beginning and end of each batch. This is implemented via custom emit_bb_start & emit_fini_breadcrumb functions and enabled by default if a context is configured by set parallel extension. Lastly, this patch updates the process descriptor to the correct size as the memory used in the handshake is directly after the process descriptor. v2: (John Harrison) - Fix a few comments wording - Add struture for parent page layout v3: (John Harrison) - A structure for sync semaphore - Use offsetof to calc address - Update commit message v4: (John Harrison) - Fix typos in comment explaining memory map of scratch page Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-20-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 333 +++++++++++++++++++++- 4 files changed, 326 insertions(+), 13 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 709c433a2dcc..488acd39ff67 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -569,7 +569,7 @@ void intel_context_bind_parent_child(struct intel_context *parent, GEM_BUG_ON(intel_context_is_child(child)); GEM_BUG_ON(intel_context_is_parent(child)); - parent->parallel.number_children++; + parent->parallel.child_index = parent->parallel.number_children++; list_add_tail(&child->parallel.child_link, &parent->parallel.child_list); child->parallel.parent = parent; diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 1d880303a7e4..95a5b94b4ece 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -250,6 +250,8 @@ struct intel_context { struct i915_request *last_rq; /** @number_children: number of children if parent */ u8 number_children; + /** @child_index: index into child_list if child */ + u8 child_index; /** @guc: GuC specific members for parallel submission */ struct { /** @wqi_head: head pointer in work queue */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 18da67cfcd92..722933e26347 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -186,7 +186,7 @@ struct guc_process_desc { u32 wq_status; u32 engine_presence; u32 priority; - u32 reserved[30]; + u32 reserved[36]; } __packed; #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 57eb5f8bc8bb..50f0f4eba03b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -11,6 +11,7 @@ #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" @@ -368,11 +369,16 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) /* * When using multi-lrc submission a scratch memory area is reserved in the - * parent's context state for the process descriptor and work queue. Currently - * the scratch area is sized to a page. + * parent's context state for the process descriptor, work queue, and handshake + * between the parent + children contexts to insert safe preemption points + * between each of the BBs. Currently the scratch area is sized to a page. * * The layout of this scratch area is below: * 0 guc_process_desc + * + sizeof(struct guc_process_desc) child go + * + CACHELINE_BYTES child join[0] + * ... + * + CACHELINE_BYTES child join[n - 1] * ... unused * PARENT_SCRATCH_SIZE / 2 work queue start * ... work queue @@ -380,7 +386,25 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) */ #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2) #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE) -static u32 __get_process_desc_offset(struct intel_context *ce) + +struct sync_semaphore { + u32 semaphore; + u8 unused[CACHELINE_BYTES - sizeof(u32)]; +}; + +struct parent_scratch { + struct guc_process_desc pdesc; + + struct sync_semaphore go; + struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; + + u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) - + sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; + + u32 wq[WQ_SIZE / sizeof(u32)]; +}; + +static u32 __get_parent_scratch_offset(struct intel_context *ce) { GEM_BUG_ON(!ce->parallel.guc.parent_page); @@ -389,23 +413,36 @@ static u32 __get_process_desc_offset(struct intel_context *ce) static u32 __get_wq_offset(struct intel_context *ce) { - return __get_process_desc_offset(ce) + WQ_OFFSET; + BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET); + + return __get_parent_scratch_offset(ce) + WQ_OFFSET; } -static struct guc_process_desc * -__get_process_desc(struct intel_context *ce) +static struct parent_scratch * +__get_parent_scratch(struct intel_context *ce) { + BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE); + BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES); + /* * Need to subtract LRC_STATE_OFFSET here as the * parallel.guc.parent_page is the offset into ce->state while * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET. */ - return (struct guc_process_desc *) + return (struct parent_scratch *) (ce->lrc_reg_state + - ((__get_process_desc_offset(ce) - + ((__get_parent_scratch_offset(ce) - LRC_STATE_OFFSET) / sizeof(u32))); } +static struct guc_process_desc * +__get_process_desc(struct intel_context *ce) +{ + struct parent_scratch *ps = __get_parent_scratch(ce); + + return &ps->pdesc; +} + static u32 *get_wq_pointer(struct guc_process_desc *desc, struct intel_context *ce, u32 wqi_size) @@ -425,8 +462,7 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc, } #undef AVAILABLE_SPACE - return ((u32 *)__get_process_desc(ce)) + - ((WQ_OFFSET + ce->parallel.guc.wqi_tail) / sizeof(u32)); + return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; } static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) @@ -1827,6 +1863,27 @@ static int deregister_context(struct intel_context *ce, u32 guc_id) return __guc_action_deregister_context(guc, guc_id); } +static inline void clear_children_join_go_memory(struct intel_context *ce) +{ + struct parent_scratch *ps = __get_parent_scratch(ce); + int i; + + ps->go.semaphore = 0; + for (i = 0; i < ce->parallel.number_children + 1; ++i) + ps->join[i].semaphore = 0; +} + +static inline u32 get_children_go_value(struct intel_context *ce) +{ + return __get_parent_scratch(ce)->go.semaphore; +} + +static inline u32 get_children_join_value(struct intel_context *ce, + u8 child_index) +{ + return __get_parent_scratch(ce)->join[child_index].semaphore; +} + static void guc_context_policy_init(struct intel_engine_cs *engine, struct guc_lrc_desc *desc) { @@ -1886,7 +1943,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ce->parallel.guc.wqi_head = 0; desc->process_desc = i915_ggtt_offset(ce->state) + - __get_process_desc_offset(ce); + __get_parent_scratch_offset(ce); desc->wq_addr = i915_ggtt_offset(ce->state) + __get_wq_offset(ce); desc->wq_size = WQ_SIZE; @@ -1908,6 +1965,8 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc); } + + clear_children_join_go_memory(ce); } /* @@ -2974,6 +3033,31 @@ static const struct intel_context_ops virtual_child_context_ops = { .get_sibling = guc_virtual_get_sibling, }; +/* + * The below override of the breadcrumbs is enabled when the user configures a + * context for parallel submission (multi-lrc, parent-child). + * + * The overridden breadcrumbs implements an algorithm which allows the GuC to + * safely preempt all the hw contexts configured for parallel submission + * between each BB. The contract between the i915 and GuC is if the parent + * context can be preempted, all the children can be preempted, and the GuC will + * always try to preempt the parent before the children. A handshake between the + * parent / children breadcrumbs ensures the i915 holds up its end of the deal + * creating a window to preempt between each set of BBs. + */ +static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +static u32 * +emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs); +static u32 * +emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs); + static struct intel_context * guc_create_parallel(struct intel_engine_cs **engines, unsigned int num_siblings, @@ -3009,6 +3093,20 @@ guc_create_parallel(struct intel_engine_cs **engines, } } + parent->engine->emit_bb_start = + emit_bb_start_parent_no_preempt_mid_batch; + parent->engine->emit_fini_breadcrumb = + emit_fini_breadcrumb_parent_no_preempt_mid_batch; + parent->engine->emit_fini_breadcrumb_dw = + 12 + 4 * parent->parallel.number_children; + for_each_child(parent, ce) { + ce->engine->emit_bb_start = + emit_bb_start_child_no_preempt_mid_batch; + ce->engine->emit_fini_breadcrumb = + emit_fini_breadcrumb_child_no_preempt_mid_batch; + ce->engine->emit_fini_breadcrumb_dw = 16; + } + kfree(siblings); return parent; @@ -3837,6 +3935,17 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, drm_printf(p, "\t\tWQI Status: %u\n\n", READ_ONCE(desc->wq_status)); + if (ce->engine->emit_bb_start == + emit_bb_start_parent_no_preempt_mid_batch) { + u8 i; + + drm_printf(p, "\t\tChildren Go: %u\n\n", + get_children_go_value(ce)); + for (i = 0; i < ce->parallel.number_children; ++i) + drm_printf(p, "\t\tChildren Join: %u\n", + get_children_join_value(ce, i)); + } + for_each_child(ce, child) guc_log_context(p, child); } @@ -3844,6 +3953,208 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, xa_unlock_irqrestore(&guc->context_lookup, flags); } +static inline u32 get_children_go_addr(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + return i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce) + + offsetof(struct parent_scratch, go.semaphore); +} + +static inline u32 get_children_join_addr(struct intel_context *ce, + u8 child_index) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + return i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce) + + offsetof(struct parent_scratch, join[child_index].semaphore); +} + +#define PARENT_GO_BB 1 +#define PARENT_GO_FINI_BREADCRUMB 0 +#define CHILD_GO_BB 1 +#define CHILD_GO_FINI_BREADCRUMB 0 +static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + struct intel_context *ce = rq->context; + u32 *cs; + u8 i; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Wait on children */ + for (i = 0; i < ce->parallel.number_children; ++i) { + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = PARENT_GO_BB; + *cs++ = get_children_join_addr(ce, i); + *cs++ = 0; + } + + /* Turn off preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + + /* Tell children go */ + cs = gen8_emit_ggtt_write(cs, + CHILD_GO_BB, + get_children_go_addr(ce), + 0); + + /* Jump to batch */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + struct intel_context *ce = rq->context; + struct intel_context *parent = intel_context_to_parent(ce); + u32 *cs; + + GEM_BUG_ON(!intel_context_is_child(ce)); + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Signal parent */ + cs = gen8_emit_ggtt_write(cs, + PARENT_GO_BB, + get_children_join_addr(parent, + ce->parallel.child_index), + 0); + + /* Wait on parent for go */ + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = CHILD_GO_BB; + *cs++ = get_children_go_addr(parent); + *cs++ = 0; + + /* Turn off preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* Jump to batch */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + intel_ring_advance(rq, cs); + + return 0; +} + +static u32 * +emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + u8 i; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + /* Wait on children */ + for (i = 0; i < ce->parallel.number_children; ++i) { + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = PARENT_GO_FINI_BREADCRUMB; + *cs++ = get_children_join_addr(ce, i); + *cs++ = 0; + } + + /* Turn on preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + /* Tell children go */ + cs = gen8_emit_ggtt_write(cs, + CHILD_GO_FINI_BREADCRUMB, + get_children_go_addr(ce), + 0); + + /* Emit fini breadcrumb */ + cs = gen8_emit_ggtt_write(cs, + rq->fence.seqno, + i915_request_active_timeline(rq)->hwsp_offset, + 0); + + /* User interrupt */ + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + + return cs; +} + +static u32 * +emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, u32 *cs) +{ + struct intel_context *ce = rq->context; + struct intel_context *parent = intel_context_to_parent(ce); + + GEM_BUG_ON(!intel_context_is_child(ce)); + + /* Turn on preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + /* Signal parent */ + cs = gen8_emit_ggtt_write(cs, + PARENT_GO_FINI_BREADCRUMB, + get_children_join_addr(parent, + ce->parallel.child_index), + 0); + + /* Wait parent on for go */ + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = CHILD_GO_FINI_BREADCRUMB; + *cs++ = get_children_go_addr(parent); + *cs++ = 0; + + /* Emit fini breadcrumb */ + cs = gen8_emit_ggtt_write(cs, + rq->fence.seqno, + i915_request_active_timeline(rq)->hwsp_offset, + 0); + + /* User interrupt */ + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + + return cs; +} + static struct intel_context * guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, unsigned long flags) -- cgit v1.2.3 From 544460c33821b44c2f0c643121303c3dc3f66ef1 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:20:00 -0700 Subject: drm/i915: Multi-BB execbuf Allow multiple batch buffers to be submitted in a single execbuf IOCTL after a context has been configured with the 'set_parallel' extension. The number batches is implicit based on the contexts configuration. This is implemented with a series of loops. First a loop is used to find all the batches, a loop to pin all the HW contexts, a loop to create all the requests, a loop to submit (emit BB start, etc...) all the requests, a loop to tie the requests to the VMAs they touch, and finally a loop to commit the requests to the backend. A composite fence is also created for the generated requests to return to the user and to stick in dma resv slots. No behavior from the existing IOCTL should be changed aside from when throttling because the ring for a context is full. In this situation, i915 will now wait while holding the object locks. This change was done because the code is much simpler to wait while holding the locks and we believe there isn't a huge benefit of dropping these locks. If this proves false we can restructure the code to drop the locks during the wait. IGT: https://patchwork.freedesktop.org/patch/447008/?series=93071&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Matthew Brost) - Return proper error value if i915_request_create fails v3: (John Harrison) - Add comment explaining create / add order loops + locking - Update commit message explaining different in IOCTL behavior - Line wrap some comments - eb_add_request returns void - Return -EINVAL rather triggering BUG_ON if cmd parser used (Checkpatch) - Check eb->batch_len[*current_batch] v4: (CI) - Set batch len if passed if via execbuf args - Call __i915_request_skip after __i915_request_commit (Kernel test robot) - Initialize rq to NULL in eb_pin_timeline v5: (John Harrison) - Fix typo in comments near bb order loops Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-21-matthew.brost@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 783 +++++++++++++++------- drivers/gpu/drm/i915/gt/intel_context.h | 8 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 10 + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 + drivers/gpu/drm/i915/i915_request.h | 9 + drivers/gpu/drm/i915/i915_vma.c | 21 +- drivers/gpu/drm/i915/i915_vma.h | 13 +- 7 files changed, 595 insertions(+), 251 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 8b3a25bd93e6..1cb241298d9b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -246,17 +246,25 @@ struct i915_execbuffer { struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ struct eb_vma *vma; - struct intel_engine_cs *engine; /** engine to queue the request to */ + struct intel_gt *gt; /* gt for the execbuf */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ - struct i915_request *request; /** our request to build */ - struct eb_vma *batch; /** identity of the batch obj/vma */ + /** our requests to build */ + struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; + /** identity of the batch obj/vma */ + struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1]; struct i915_vma *trampoline; /** trampoline used for chaining */ + /** used for excl fence in dma_resv objects when > 1 BB submitted */ + struct dma_fence *composite_fence; + /** actual size of execobj[] as we may extend it for the cmdparser */ unsigned int buffer_count; + /* number of batches in execbuf IOCTL */ + unsigned int num_batches; + /** list of vma not yet bound during reservation phase */ struct list_head unbound; @@ -283,7 +291,8 @@ struct i915_execbuffer { u64 invalid_flags; /** Set of execobj.flags that are invalid */ - u64 batch_len; /** Length of batch within object */ + /** Length of batch within object */ + u64 batch_len[MAX_ENGINE_INSTANCE + 1]; u32 batch_start_offset; /** Location within object of batch */ u32 batch_flags; /** Flags composed for emit_bb_start() */ struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ @@ -301,14 +310,13 @@ struct i915_execbuffer { }; static int eb_parse(struct i915_execbuffer *eb); -static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, - bool throttle); +static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { - return intel_engine_requires_cmd_parser(eb->engine) || - (intel_engine_using_cmd_parser(eb->engine) && + return intel_engine_requires_cmd_parser(eb->context->engine) || + (intel_engine_using_cmd_parser(eb->context->engine) && eb->args->batch_len); } @@ -535,11 +543,21 @@ eb_validate_vma(struct i915_execbuffer *eb, return 0; } -static void +static inline bool +is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) +{ + return eb->args->flags & I915_EXEC_BATCH_FIRST ? + buffer_idx < eb->num_batches : + buffer_idx >= eb->args->buffer_count - eb->num_batches; +} + +static int eb_add_vma(struct i915_execbuffer *eb, - unsigned int i, unsigned batch_idx, + unsigned int *current_batch, + unsigned int i, struct i915_vma *vma) { + struct drm_i915_private *i915 = eb->i915; struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; @@ -566,15 +584,43 @@ eb_add_vma(struct i915_execbuffer *eb, * Note that actual hangs have only been observed on gen7, but for * paranoia do it everywhere. */ - if (i == batch_idx) { + if (is_batch_buffer(eb, i)) { if (entry->relocation_count && !(ev->flags & EXEC_OBJECT_PINNED)) ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) ev->flags |= EXEC_OBJECT_NEEDS_FENCE; - eb->batch = ev; + eb->batches[*current_batch] = ev; + + if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) { + drm_dbg(&i915->drm, + "Attempting to use self-modifying batch buffer\n"); + return -EINVAL; + } + + if (range_overflows_t(u64, + eb->batch_start_offset, + eb->args->batch_len, + ev->vma->size)) { + drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); + return -EINVAL; + } + + if (eb->args->batch_len == 0) + eb->batch_len[*current_batch] = ev->vma->size - + eb->batch_start_offset; + else + eb->batch_len[*current_batch] = eb->args->batch_len; + if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */ + drm_dbg(&i915->drm, "Invalid batch length\n"); + return -EINVAL; + } + + ++*current_batch; } + + return 0; } static inline int use_cpu_reloc(const struct reloc_cache *cache, @@ -718,14 +764,6 @@ static int eb_reserve(struct i915_execbuffer *eb) } while (1); } -static unsigned int eb_batch_index(const struct i915_execbuffer *eb) -{ - if (eb->args->flags & I915_EXEC_BATCH_FIRST) - return 0; - else - return eb->buffer_count - 1; -} - static int eb_select_context(struct i915_execbuffer *eb) { struct i915_gem_context *ctx; @@ -846,9 +884,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) static int eb_lookup_vmas(struct i915_execbuffer *eb) { - struct drm_i915_private *i915 = eb->i915; - unsigned int batch = eb_batch_index(eb); - unsigned int i; + unsigned int i, current_batch = 0; int err = 0; INIT_LIST_HEAD(&eb->relocs); @@ -868,7 +904,9 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err; } - eb_add_vma(eb, i, batch, vma); + err = eb_add_vma(eb, ¤t_batch, i, vma); + if (err) + return err; if (i915_gem_object_is_userptr(vma->obj)) { err = i915_gem_object_userptr_submit_init(vma->obj); @@ -891,26 +929,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) } } - if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) { - drm_dbg(&i915->drm, - "Attempting to use self-modifying batch buffer\n"); - return -EINVAL; - } - - if (range_overflows_t(u64, - eb->batch_start_offset, eb->batch_len, - eb->batch->vma->size)) { - drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); - return -EINVAL; - } - - if (eb->batch_len == 0) - eb->batch_len = eb->batch->vma->size - eb->batch_start_offset; - if (unlikely(eb->batch_len == 0)) { /* impossible! */ - drm_dbg(&i915->drm, "Invalid batch length\n"); - return -EINVAL; - } - return 0; err: @@ -1643,8 +1661,7 @@ static int eb_reinit_userptr(struct i915_execbuffer *eb) return 0; } -static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb, - struct i915_request *rq) +static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb) { bool have_copy = false; struct eb_vma *ev; @@ -1660,21 +1677,6 @@ repeat: eb_release_vmas(eb, false); i915_gem_ww_ctx_fini(&eb->ww); - if (rq) { - /* nonblocking is always false */ - if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT) < 0) { - i915_request_put(rq); - rq = NULL; - - err = -EINTR; - goto err_relock; - } - - i915_request_put(rq); - rq = NULL; - } - /* * We take 3 passes through the slowpatch. * @@ -1701,28 +1703,21 @@ repeat: if (!err) err = eb_reinit_userptr(eb); -err_relock: i915_gem_ww_ctx_init(&eb->ww, true); if (err) goto out; /* reacquire the objects */ repeat_validate: - rq = eb_pin_engine(eb, false); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - rq = NULL; + err = eb_pin_engine(eb, false); + if (err) goto err; - } - - /* We didn't throttle, should be NULL */ - GEM_WARN_ON(rq); err = eb_validate_vmas(eb); if (err) goto err; - GEM_BUG_ON(!eb->batch); + GEM_BUG_ON(!eb->batches[0]); list_for_each_entry(ev, &eb->relocs, reloc_link) { if (!have_copy) { @@ -1786,46 +1781,23 @@ out: } } - if (rq) - i915_request_put(rq); - return err; } static int eb_relocate_parse(struct i915_execbuffer *eb) { int err; - struct i915_request *rq = NULL; bool throttle = true; retry: - rq = eb_pin_engine(eb, throttle); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - rq = NULL; + err = eb_pin_engine(eb, throttle); + if (err) { if (err != -EDEADLK) return err; goto err; } - if (rq) { - bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; - - /* Need to drop all locks now for throttling, take slowpath */ - err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0); - if (err == -ETIME) { - if (nonblock) { - err = -EWOULDBLOCK; - i915_request_put(rq); - goto err; - } - goto slow; - } - i915_request_put(rq); - rq = NULL; - } - /* only throttle once, even if we didn't need to throttle */ throttle = false; @@ -1865,7 +1837,7 @@ err: return err; slow: - err = eb_relocate_parse_slow(eb, rq); + err = eb_relocate_parse_slow(eb); if (err) /* * If the user expects the execobject.offset and @@ -1879,11 +1851,40 @@ slow: return err; } +/* + * Using two helper loops for the order of which requests / batches are created + * and added the to backend. Requests are created in order from the parent to + * the last child. Requests are added in the reverse order, from the last child + * to parent. This is done for locking reasons as the timeline lock is acquired + * during request creation and released when the request is added to the + * backend. To make lockdep happy (see intel_context_timeline_lock) this must be + * the ordering. + */ +#define for_each_batch_create_order(_eb, _i) \ + for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i)) +#define for_each_batch_add_order(_eb, _i) \ + BUILD_BUG_ON(!typecheck(int, _i)); \ + for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i)) + +static struct i915_request * +eb_find_first_request_added(struct i915_execbuffer *eb) +{ + int i; + + for_each_batch_add_order(eb, i) + if (eb->requests[i]) + return eb->requests[i]; + + GEM_BUG_ON("Request not found"); + + return NULL; +} + static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i = count; - int err = 0; + int err = 0, j; while (i--) { struct eb_vma *ev = &eb->vma[i]; @@ -1896,11 +1897,17 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) if (flags & EXEC_OBJECT_CAPTURE) { struct i915_capture_list *capture; - capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (capture) { - capture->next = eb->request->capture_list; - capture->vma = vma; - eb->request->capture_list = capture; + for_each_batch_create_order(eb, j) { + if (!eb->requests[j]) + break; + + capture = kmalloc(sizeof(*capture), GFP_KERNEL); + if (capture) { + capture->next = + eb->requests[j]->capture_list; + capture->vma = vma; + eb->requests[j]->capture_list = capture; + } } } @@ -1921,14 +1928,26 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) flags &= ~EXEC_OBJECT_ASYNC; } + /* We only need to await on the first request */ if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { err = i915_request_await_object - (eb->request, obj, flags & EXEC_OBJECT_WRITE); + (eb_find_first_request_added(eb), obj, + flags & EXEC_OBJECT_WRITE); } - if (err == 0) - err = i915_vma_move_to_active(vma, eb->request, - flags | __EXEC_OBJECT_NO_RESERVE); + for_each_batch_add_order(eb, j) { + if (err) + break; + if (!eb->requests[j]) + continue; + + err = _i915_vma_move_to_active(vma, eb->requests[j], + j ? NULL : + eb->composite_fence ? + eb->composite_fence : + &eb->requests[j]->fence, + flags | __EXEC_OBJECT_NO_RESERVE); + } } #ifdef CONFIG_MMU_NOTIFIER @@ -1959,11 +1978,16 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) goto err_skip; /* Unconditionally flush any chipset caches (for streaming writes). */ - intel_gt_chipset_flush(eb->engine->gt); + intel_gt_chipset_flush(eb->gt); return 0; err_skip: - i915_request_set_error_once(eb->request, err); + for_each_batch_create_order(eb, j) { + if (!eb->requests[j]) + break; + + i915_request_set_error_once(eb->requests[j], err); + } return err; } @@ -2058,14 +2082,17 @@ static int eb_parse(struct i915_execbuffer *eb) int err; if (!eb_use_cmdparser(eb)) { - batch = eb_dispatch_secure(eb, eb->batch->vma); + batch = eb_dispatch_secure(eb, eb->batches[0]->vma); if (IS_ERR(batch)) return PTR_ERR(batch); goto secure_batch; } - len = eb->batch_len; + if (intel_context_is_parallel(eb->context)) + return -EINVAL; + + len = eb->batch_len[0]; if (!CMDPARSER_USES_GGTT(eb->i915)) { /* * ppGTT backed shadow buffers must be mapped RO, to prevent @@ -2079,11 +2106,11 @@ static int eb_parse(struct i915_execbuffer *eb) } else { len += I915_CMD_PARSER_TRAMPOLINE_SIZE; } - if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */ + if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */ return -EINVAL; if (!pool) { - pool = intel_gt_get_buffer_pool(eb->engine->gt, len, + pool = intel_gt_get_buffer_pool(eb->gt, len, I915_MAP_WB); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -2108,7 +2135,7 @@ static int eb_parse(struct i915_execbuffer *eb) trampoline = shadow; shadow = shadow_batch_pin(eb, pool->obj, - &eb->engine->gt->ggtt->vm, + &eb->gt->ggtt->vm, PIN_GLOBAL); if (IS_ERR(shadow)) { err = PTR_ERR(shadow); @@ -2130,26 +2157,29 @@ static int eb_parse(struct i915_execbuffer *eb) if (err) goto err_trampoline; - err = intel_engine_cmd_parser(eb->engine, - eb->batch->vma, + err = intel_engine_cmd_parser(eb->context->engine, + eb->batches[0]->vma, eb->batch_start_offset, - eb->batch_len, + eb->batch_len[0], shadow, trampoline); if (err) goto err_unpin_batch; - eb->batch = &eb->vma[eb->buffer_count++]; - eb->batch->vma = i915_vma_get(shadow); - eb->batch->flags = __EXEC_OBJECT_HAS_PIN; + eb->batches[0] = &eb->vma[eb->buffer_count++]; + eb->batches[0]->vma = i915_vma_get(shadow); + eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; eb->trampoline = trampoline; eb->batch_start_offset = 0; secure_batch: if (batch) { - eb->batch = &eb->vma[eb->buffer_count++]; - eb->batch->flags = __EXEC_OBJECT_HAS_PIN; - eb->batch->vma = i915_vma_get(batch); + if (intel_context_is_parallel(eb->context)) + return -EINVAL; + + eb->batches[0] = &eb->vma[eb->buffer_count++]; + eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; + eb->batches[0]->vma = i915_vma_get(batch); } return 0; @@ -2165,19 +2195,18 @@ err: return err; } -static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) +static int eb_request_submit(struct i915_execbuffer *eb, + struct i915_request *rq, + struct i915_vma *batch, + u64 batch_len) { int err; - if (intel_context_nopreempt(eb->context)) - __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); - - err = eb_move_to_gpu(eb); - if (err) - return err; + if (intel_context_nopreempt(rq->context)) + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { - err = i915_reset_gen7_sol_offsets(eb->request); + err = i915_reset_gen7_sol_offsets(rq); if (err) return err; } @@ -2188,26 +2217,26 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) * allows us to determine if the batch is still waiting on the GPU * or actually running by checking the breadcrumb. */ - if (eb->engine->emit_init_breadcrumb) { - err = eb->engine->emit_init_breadcrumb(eb->request); + if (rq->context->engine->emit_init_breadcrumb) { + err = rq->context->engine->emit_init_breadcrumb(rq); if (err) return err; } - err = eb->engine->emit_bb_start(eb->request, - batch->node.start + - eb->batch_start_offset, - eb->batch_len, - eb->batch_flags); + err = rq->context->engine->emit_bb_start(rq, + batch->node.start + + eb->batch_start_offset, + batch_len, + eb->batch_flags); if (err) return err; if (eb->trampoline) { + GEM_BUG_ON(intel_context_is_parallel(rq->context)); GEM_BUG_ON(eb->batch_start_offset); - err = eb->engine->emit_bb_start(eb->request, - eb->trampoline->node.start + - eb->batch_len, - 0, 0); + err = rq->context->engine->emit_bb_start(rq, + eb->trampoline->node.start + + batch_len, 0, 0); if (err) return err; } @@ -2215,6 +2244,27 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) return 0; } +static int eb_submit(struct i915_execbuffer *eb) +{ + unsigned int i; + int err; + + err = eb_move_to_gpu(eb); + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + trace_i915_request_queue(eb->requests[i], eb->batch_flags); + if (!err) + err = eb_request_submit(eb, eb->requests[i], + eb->batches[i]->vma, + eb->batch_len[i]); + } + + return err; +} + static int num_vcs_engines(const struct drm_i915_private *i915) { return hweight_long(VDBOX_MASK(&i915->gt)); @@ -2280,26 +2330,11 @@ static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel return i915_request_get(rq); } -static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle) +static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, + bool throttle) { - struct intel_context *ce = eb->context; struct intel_timeline *tl; struct i915_request *rq = NULL; - int err; - - GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); - - if (unlikely(intel_context_is_banned(ce))) - return ERR_PTR(-EIO); - - /* - * Pinning the contexts may generate requests in order to acquire - * GGTT space, so do this first before we reserve a seqno for - * ourselves. - */ - err = intel_context_pin_ww(ce, &eb->ww); - if (err) - return ERR_PTR(err); /* * Take a local wakeref for preparing to dispatch the execbuf as @@ -2310,33 +2345,108 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt * taken on the engine, and the parent device. */ tl = intel_context_timeline_lock(ce); - if (IS_ERR(tl)) { - intel_context_unpin(ce); - return ERR_CAST(tl); - } + if (IS_ERR(tl)) + return PTR_ERR(tl); intel_context_enter(ce); if (throttle) rq = eb_throttle(eb, ce); intel_context_timeline_unlock(tl); + if (rq) { + bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; + long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT; + + if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, + timeout) < 0) { + i915_request_put(rq); + + tl = intel_context_timeline_lock(ce); + intel_context_exit(ce); + intel_context_timeline_unlock(tl); + + if (nonblock) + return -EWOULDBLOCK; + else + return -EINTR; + } + i915_request_put(rq); + } + + return 0; +} + +static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle) +{ + struct intel_context *ce = eb->context, *child; + int err; + int i = 0, j = 0; + + GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); + + if (unlikely(intel_context_is_banned(ce))) + return -EIO; + + /* + * Pinning the contexts may generate requests in order to acquire + * GGTT space, so do this first before we reserve a seqno for + * ourselves. + */ + err = intel_context_pin_ww(ce, &eb->ww); + if (err) + return err; + for_each_child(ce, child) { + err = intel_context_pin_ww(child, &eb->ww); + GEM_BUG_ON(err); /* perma-pinned should incr a counter */ + } + + for_each_child(ce, child) { + err = eb_pin_timeline(eb, child, throttle); + if (err) + goto unwind; + ++i; + } + err = eb_pin_timeline(eb, ce, throttle); + if (err) + goto unwind; + eb->args->flags |= __EXEC_ENGINE_PINNED; - return rq; + return 0; + +unwind: + for_each_child(ce, child) { + if (j++ < i) { + mutex_lock(&child->timeline->mutex); + intel_context_exit(child); + mutex_unlock(&child->timeline->mutex); + } + } + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + return err; } static void eb_unpin_engine(struct i915_execbuffer *eb) { - struct intel_context *ce = eb->context; - struct intel_timeline *tl = ce->timeline; + struct intel_context *ce = eb->context, *child; if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) return; eb->args->flags &= ~__EXEC_ENGINE_PINNED; - mutex_lock(&tl->mutex); + for_each_child(ce, child) { + mutex_lock(&child->timeline->mutex); + intel_context_exit(child); + mutex_unlock(&child->timeline->mutex); + + intel_context_unpin(child); + } + + mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); - mutex_unlock(&tl->mutex); + mutex_unlock(&ce->timeline->mutex); intel_context_unpin(ce); } @@ -2387,7 +2497,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb) static int eb_select_engine(struct i915_execbuffer *eb) { - struct intel_context *ce; + struct intel_context *ce, *child; unsigned int idx; int err; @@ -2400,6 +2510,20 @@ eb_select_engine(struct i915_execbuffer *eb) if (IS_ERR(ce)) return PTR_ERR(ce); + if (intel_context_is_parallel(ce)) { + if (eb->buffer_count < ce->parallel.number_children + 1) { + intel_context_put(ce); + return -EINVAL; + } + if (eb->batch_start_offset || eb->args->batch_len) { + intel_context_put(ce); + return -EINVAL; + } + } + eb->num_batches = ce->parallel.number_children + 1; + + for_each_child(ce, child) + intel_context_get(child); intel_gt_pm_get(ce->engine->gt); if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { @@ -2407,6 +2531,13 @@ eb_select_engine(struct i915_execbuffer *eb) if (err) goto err; } + for_each_child(ce, child) { + if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) { + err = intel_context_alloc_state(child); + if (err) + goto err; + } + } /* * ABI: Before userspace accesses the GPU (e.g. execbuffer), report @@ -2417,7 +2548,7 @@ eb_select_engine(struct i915_execbuffer *eb) goto err; eb->context = ce; - eb->engine = ce->engine; + eb->gt = ce->engine->gt; /* * Make sure engine pool stays alive even if we call intel_context_put @@ -2428,6 +2559,8 @@ eb_select_engine(struct i915_execbuffer *eb) err: intel_gt_pm_put(ce->engine->gt); + for_each_child(ce, child) + intel_context_put(child); intel_context_put(ce); return err; } @@ -2435,7 +2568,11 @@ err: static void eb_put_engine(struct i915_execbuffer *eb) { - intel_gt_pm_put(eb->engine->gt); + struct intel_context *child; + + intel_gt_pm_put(eb->gt); + for_each_child(eb->context, child) + intel_context_put(child); intel_context_put(eb->context); } @@ -2658,7 +2795,8 @@ static void put_fence_array(struct eb_fence *fences, int num_fences) } static int -await_fence_array(struct i915_execbuffer *eb) +await_fence_array(struct i915_execbuffer *eb, + struct i915_request *rq) { unsigned int n; int err; @@ -2672,8 +2810,7 @@ await_fence_array(struct i915_execbuffer *eb) if (!eb->fences[n].dma_fence) continue; - err = i915_request_await_dma_fence(eb->request, - eb->fences[n].dma_fence); + err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence); if (err < 0) return err; } @@ -2681,9 +2818,9 @@ await_fence_array(struct i915_execbuffer *eb) return 0; } -static void signal_fence_array(const struct i915_execbuffer *eb) +static void signal_fence_array(const struct i915_execbuffer *eb, + struct dma_fence * const fence) { - struct dma_fence * const fence = &eb->request->fence; unsigned int n; for (n = 0; n < eb->num_fences; n++) { @@ -2731,9 +2868,9 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static int eb_request_add(struct i915_execbuffer *eb, int err) +static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq, + int err, bool last_parallel) { - struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_sched_attr attr = {}; struct i915_request *prev; @@ -2755,6 +2892,17 @@ static int eb_request_add(struct i915_execbuffer *eb, int err) err = -ENOENT; /* override any transient errors */ } + if (intel_context_is_parallel(eb->context)) { + if (err) { + __i915_request_skip(rq); + set_bit(I915_FENCE_FLAG_SKIP_PARALLEL, + &rq->fence.flags); + } + if (last_parallel) + set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, + &rq->fence.flags); + } + __i915_request_queue(rq, &attr); /* Try to clean up the client's timeline after submitting the request */ @@ -2766,6 +2914,25 @@ static int eb_request_add(struct i915_execbuffer *eb, int err) return err; } +static int eb_requests_add(struct i915_execbuffer *eb, int err) +{ + int i; + + /* + * We iterate in reverse order of creation to release timeline mutexes in + * same order. + */ + for_each_batch_add_order(eb, i) { + struct i915_request *rq = eb->requests[i]; + + if (!rq) + continue; + err |= eb_request_add(eb, rq, err, i == 0); + } + + return err; +} + static const i915_user_extension_fn execbuf_extensions[] = { [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, }; @@ -2792,6 +2959,182 @@ parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, eb); } +static void eb_requests_get(struct i915_execbuffer *eb) +{ + unsigned int i; + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + i915_request_get(eb->requests[i]); + } +} + +static void eb_requests_put(struct i915_execbuffer *eb) +{ + unsigned int i; + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + i915_request_put(eb->requests[i]); + } +} + +static struct sync_file * +eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + struct dma_fence_array *fence_array; + struct dma_fence **fences; + unsigned int i; + + GEM_BUG_ON(!intel_context_is_parent(eb->context)); + + fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); + if (!fences) + return ERR_PTR(-ENOMEM); + + for_each_batch_create_order(eb, i) + fences[i] = &eb->requests[i]->fence; + + fence_array = dma_fence_array_create(eb->num_batches, + fences, + eb->context->parallel.fence_context, + eb->context->parallel.seqno, + false); + if (!fence_array) { + kfree(fences); + return ERR_PTR(-ENOMEM); + } + + /* Move ownership to the dma_fence_array created above */ + for_each_batch_create_order(eb, i) + dma_fence_get(fences[i]); + + if (out_fence_fd != -1) { + out_fence = sync_file_create(&fence_array->base); + /* sync_file now owns fence_arry, drop creation ref */ + dma_fence_put(&fence_array->base); + if (!out_fence) + return ERR_PTR(-ENOMEM); + } + + eb->composite_fence = &fence_array->base; + + return out_fence; +} + +static struct sync_file * +eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq, + struct dma_fence *in_fence, int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + int err; + + if (unlikely(eb->gem_context->syncobj)) { + struct dma_fence *fence; + + fence = drm_syncobj_fence_get(eb->gem_context->syncobj); + err = i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + if (err) + return ERR_PTR(err); + } + + if (in_fence) { + if (eb->args->flags & I915_EXEC_FENCE_SUBMIT) + err = i915_request_await_execution(rq, in_fence); + else + err = i915_request_await_dma_fence(rq, in_fence); + if (err < 0) + return ERR_PTR(err); + } + + if (eb->fences) { + err = await_fence_array(eb, rq); + if (err) + return ERR_PTR(err); + } + + if (intel_context_is_parallel(eb->context)) { + out_fence = eb_composite_fence_create(eb, out_fence_fd); + if (IS_ERR(out_fence)) + return ERR_PTR(-ENOMEM); + } else if (out_fence_fd != -1) { + out_fence = sync_file_create(&rq->fence); + if (!out_fence) + return ERR_PTR(-ENOMEM); + } + + return out_fence; +} + +static struct intel_context * +eb_find_context(struct i915_execbuffer *eb, unsigned int context_number) +{ + struct intel_context *child; + + if (likely(context_number == 0)) + return eb->context; + + for_each_child(eb->context, child) + if (!--context_number) + return child; + + GEM_BUG_ON("Context not found"); + + return NULL; +} + +static struct sync_file * +eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, + int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + unsigned int i; + + for_each_batch_create_order(eb, i) { + /* Allocate a request for this batch buffer nice and early. */ + eb->requests[i] = i915_request_create(eb_find_context(eb, i)); + if (IS_ERR(eb->requests[i])) { + out_fence = ERR_PTR(PTR_ERR(eb->requests[i])); + eb->requests[i] = NULL; + return out_fence; + } + + /* + * Only the first request added (committed to backend) has to + * take the in fences into account as all subsequent requests + * will have fences inserted inbetween them. + */ + if (i + 1 == eb->num_batches) { + out_fence = eb_fences_add(eb, eb->requests[i], + in_fence, out_fence_fd); + if (IS_ERR(out_fence)) + return out_fence; + } + + /* + * Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when + * this request is retired will the batch_obj be moved onto + * the inactive_list and lose its active reference. Hence we do + * not need to explicitly hold another reference here. + */ + eb->requests[i]->batch = eb->batches[i]->vma; + if (eb->batch_pool) { + GEM_BUG_ON(intel_context_is_parallel(eb->context)); + intel_gt_buffer_pool_mark_active(eb->batch_pool, + eb->requests[i]); + } + } + + return out_fence; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, @@ -2802,7 +3145,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; - struct i915_vma *batch; int out_fence_fd = -1; int err; @@ -2826,12 +3168,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.buffer_count = args->buffer_count; eb.batch_start_offset = args->batch_start_offset; - eb.batch_len = args->batch_len; eb.trampoline = NULL; eb.fences = NULL; eb.num_fences = 0; + memset(eb.requests, 0, sizeof(struct i915_request *) * + ARRAY_SIZE(eb.requests)); + eb.composite_fence = NULL; + eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { if (GRAPHICS_VER(i915) >= 11) @@ -2915,70 +3260,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, ww_acquire_done(&eb.ww.ctx); - batch = eb.batch->vma; - - /* Allocate a request for this batch buffer nice and early. */ - eb.request = i915_request_create(eb.context); - if (IS_ERR(eb.request)) { - err = PTR_ERR(eb.request); - goto err_vma; - } - - if (unlikely(eb.gem_context->syncobj)) { - struct dma_fence *fence; - - fence = drm_syncobj_fence_get(eb.gem_context->syncobj); - err = i915_request_await_dma_fence(eb.request, fence); - dma_fence_put(fence); - if (err) - goto err_ext; - } - - if (in_fence) { - if (args->flags & I915_EXEC_FENCE_SUBMIT) - err = i915_request_await_execution(eb.request, - in_fence); - else - err = i915_request_await_dma_fence(eb.request, - in_fence); - if (err < 0) - goto err_request; - } - - if (eb.fences) { - err = await_fence_array(&eb); - if (err) + out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); + if (IS_ERR(out_fence)) { + err = PTR_ERR(out_fence); + if (eb.requests[0]) goto err_request; + else + goto err_vma; } - if (out_fence_fd != -1) { - out_fence = sync_file_create(&eb.request->fence); - if (!out_fence) { - err = -ENOMEM; - goto err_request; - } - } - - /* - * Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - eb.request->batch = batch; - if (eb.batch_pool) - intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request); - - trace_i915_request_queue(eb.request, eb.batch_flags); - err = eb_submit(&eb, batch); + err = eb_submit(&eb); err_request: - i915_request_get(eb.request); - err = eb_request_add(&eb, err); + eb_requests_get(&eb); + err = eb_requests_add(&eb, err); if (eb.fences) - signal_fence_array(&eb); + signal_fence_array(&eb, eb.composite_fence ? + eb.composite_fence : + &eb.requests[0]->fence); if (out_fence) { if (err == 0) { @@ -2993,10 +3293,15 @@ err_request: if (unlikely(eb.gem_context->syncobj)) { drm_syncobj_replace_fence(eb.gem_context->syncobj, - &eb.request->fence); + eb.composite_fence ? + eb.composite_fence : + &eb.requests[0]->fence); } - i915_request_put(eb.request); + if (!out_fence && eb.composite_fence) + dma_fence_put(eb.composite_fence); + + eb_requests_put(&eb); err_vma: eb_release_vmas(&eb, true); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index edf12caaade3..246c37d72cd7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -241,7 +241,13 @@ intel_context_timeline_lock(struct intel_context *ce) struct intel_timeline *tl = ce->timeline; int err; - err = mutex_lock_interruptible(&tl->mutex); + if (intel_context_is_parent(ce)) + err = mutex_lock_interruptible_nested(&tl->mutex, 0); + else if (intel_context_is_child(ce)) + err = mutex_lock_interruptible_nested(&tl->mutex, + ce->parallel.child_index + 1); + else + err = mutex_lock_interruptible(&tl->mutex); if (err) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 95a5b94b4ece..9e0177dc5484 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -248,6 +248,16 @@ struct intel_context { * context */ struct i915_request *last_rq; + /** + * @fence_context: fence context composite fence when doing + * parallel submission + */ + u64 fence_context; + /** + * @seqno: seqno for composite fence when doing parallel + * submission + */ + u32 seqno; /** @number_children: number of children if parent */ u8 number_children; /** @child_index: index into child_list if child */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 50f0f4eba03b..361fab2cae99 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -3093,6 +3093,8 @@ guc_create_parallel(struct intel_engine_cs **engines, } } + parent->parallel.fence_context = dma_fence_context_alloc(1); + parent->engine->emit_bb_start = emit_bb_start_parent_no_preempt_mid_batch; parent->engine->emit_fini_breadcrumb = diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 8950785e55d6..24db8459376b 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -147,6 +147,15 @@ enum { * tail. */ I915_FENCE_FLAG_SUBMIT_PARALLEL, + + /* + * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a + * parent-child relationship (parallel submission, multi-lrc) that + * hit an error while generating requests in the execbuf IOCTL. + * Indicates this request should be skipped as another request in + * submission / relationship encoutered an error. + */ + I915_FENCE_FLAG_SKIP_PARALLEL, }; /** diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4b7fc4647e46..90546fa58fc1 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1234,9 +1234,10 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) return i915_active_add_request(&vma->active, rq); } -int i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) +int _i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + struct dma_fence *fence, + unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; int err; @@ -1257,9 +1258,11 @@ int i915_vma_move_to_active(struct i915_vma *vma, intel_frontbuffer_put(front); } - dma_resv_add_excl_fence(vma->resv, &rq->fence); - obj->write_domain = I915_GEM_DOMAIN_RENDER; - obj->read_domains = 0; + if (fence) { + dma_resv_add_excl_fence(vma->resv, fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; + obj->read_domains = 0; + } } else { if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { err = dma_resv_reserve_shared(vma->resv, 1); @@ -1267,8 +1270,10 @@ int i915_vma_move_to_active(struct i915_vma *vma, return err; } - dma_resv_add_shared_fence(vma->resv, &rq->fence); - obj->write_domain = 0; + if (fence) { + dma_resv_add_shared_fence(vma->resv, fence); + obj->write_domain = 0; + } } if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index ed69f66c7ab0..648dbe744c96 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -57,9 +57,16 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma) int __must_check __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq); -int __must_check i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); +int __must_check _i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + struct dma_fence *fence, + unsigned int flags); +static inline int __must_check +i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, + unsigned int flags) +{ + return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); +} #define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) -- cgit v1.2.3 From 28c7023332ceb95c19d9f19914a63f88d15ab427 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:20:01 -0700 Subject: drm/i915/guc: Handle errors in multi-lrc requests If an error occurs in the front end when multi-lrc requests are getting generated we need to skip these in the backend but we still need to emit the breadcrumbs seqno. An issues arises because with multi-lrc breadcrumbs there is a handshake between the parent and children to make forward progress. If all the requests are not present this handshake doesn't work. To work around this, if multi-lrc request has an error we skip the handshake but still emit the breadcrumbs seqno. v2: (John Harrison) - Add comment explaining the skipping of the handshake logic - Fix typos in the commit message v3: (John Harrison) - Fix up some comments about the math to NOP the ring Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-22-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 ++++++++++++++++++++++- 1 file changed, 66 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 361fab2cae99..d7710debcd47 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -4070,8 +4070,8 @@ static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, } static u32 * -emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, - u32 *cs) +__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) { struct intel_context *ce = rq->context; u8 i; @@ -4099,6 +4099,45 @@ emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, get_children_go_addr(ce), 0); + return cs; +} + +/* + * If this true, a submission of multi-lrc requests had an error and the + * requests need to be skipped. The front end (execuf IOCTL) should've called + * i915_request_skip which squashes the BB but we still need to emit the fini + * breadrcrumbs seqno write. At this point we don't know how many of the + * requests in the multi-lrc submission were generated so we can't do the + * handshake between the parent and children (e.g. if 4 requests should be + * generated but 2nd hit an error only 1 would be seen by the GuC backend). + * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error + * has occurred on any of the requests in submission / relationship. + */ +static inline bool skip_handshake(struct i915_request *rq) +{ + return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags); +} + +static u32 * +emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + if (unlikely(skip_handshake(rq))) { + /* + * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch, + * the -6 comes from the length of the emits below. + */ + memset(cs, 0, sizeof(u32) * + (ce->engine->emit_fini_breadcrumb_dw - 6)); + cs += ce->engine->emit_fini_breadcrumb_dw - 6; + } else { + cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs); + } + /* Emit fini breadcrumb */ cs = gen8_emit_ggtt_write(cs, rq->fence.seqno, @@ -4115,7 +4154,8 @@ emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, } static u32 * -emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, u32 *cs) +__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) { struct intel_context *ce = rq->context; struct intel_context *parent = intel_context_to_parent(ce); @@ -4142,6 +4182,29 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, u32 *cs *cs++ = get_children_go_addr(parent); *cs++ = 0; + return cs; +} + +static u32 * +emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + + GEM_BUG_ON(!intel_context_is_child(ce)); + + if (unlikely(skip_handshake(rq))) { + /* + * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch, + * the -6 comes from the length of the emits below. + */ + memset(cs, 0, sizeof(u32) * + (ce->engine->emit_fini_breadcrumb_dw - 6)); + cs += ce->engine->emit_fini_breadcrumb_dw - 6; + } else { + cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs); + } + /* Emit fini breadcrumb */ cs = gen8_emit_ggtt_write(cs, rq->fence.seqno, -- cgit v1.2.3 From afc76f307e60c865c436e3828a7756e0c358fe0d Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:20:02 -0700 Subject: drm/i915: Make request conflict tracking understand parallel submits If an object in the excl or shared slot is a composite fence from a parallel submit and the current request in the conflict tracking is from the same parallel context there is no need to enforce ordering as the ordering is already implicit. Make the request conflict tracking understand this by comparing a parallel submit's parent context and skipping conflict insertion if the values match. v2: (John Harrison) - Reword commit message Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-23-matthew.brost@intel.com --- drivers/gpu/drm/i915/i915_request.c | 43 +++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 14 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index d29e46a001b4..2c3cd6e635b5 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1335,6 +1335,25 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) return err; } +static inline bool is_parallel_rq(struct i915_request *rq) +{ + return intel_context_is_parallel(rq->context); +} + +static inline struct intel_context *request_to_parent(struct i915_request *rq) +{ + return intel_context_to_parent(rq->context); +} + +static bool is_same_parallel_context(struct i915_request *to, + struct i915_request *from) +{ + if (is_parallel_rq(to)) + return request_to_parent(to) == request_to_parent(from); + + return false; +} + int i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence) @@ -1366,11 +1385,14 @@ i915_request_await_execution(struct i915_request *rq, * want to run our callback in all cases. */ - if (dma_fence_is_i915(fence)) + if (dma_fence_is_i915(fence)) { + if (is_same_parallel_context(rq, to_request(fence))) + continue; ret = __i915_request_await_execution(rq, to_request(fence)); - else + } else { ret = i915_request_await_external(rq, fence); + } if (ret < 0) return ret; } while (--nchild); @@ -1471,10 +1493,13 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) fence)) continue; - if (dma_fence_is_i915(fence)) + if (dma_fence_is_i915(fence)) { + if (is_same_parallel_context(rq, to_request(fence))) + continue; ret = i915_request_await_request(rq, to_request(fence)); - else + } else { ret = i915_request_await_external(rq, fence); + } if (ret < 0) return ret; @@ -1549,16 +1574,6 @@ i915_request_await_object(struct i915_request *to, return ret; } -static inline bool is_parallel_rq(struct i915_request *rq) -{ - return intel_context_is_parallel(rq->context); -} - -static inline struct intel_context *request_to_parent(struct i915_request *rq) -{ - return intel_context_to_parent(rq->context); -} - static struct i915_request * __i915_request_ensure_parallel_ordering(struct i915_request *rq, struct intel_timeline *timeline) -- cgit v1.2.3 From 7647f0096ee87376c96a47357373e02694baa3ec Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:20:03 -0700 Subject: drm/i915: Update I915_GEM_BUSY IOCTL to understand composite fences Parallel submission create composite fences (dma_fence_array) for excl / shared slots in objects. The I915_GEM_BUSY IOCTL checks these slots to determine the busyness of the object. Prior to patch it only check if the fence in the slot was a i915_request. Update the check to understand composite fences and correctly report the busyness. v2: (Tvrtko) - Remove duplicate BUILD_BUG_ON Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Matthew Brost Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-24-matthew.brost@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 57 ++++++++++++++++++++------ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 5 ++- drivers/gpu/drm/i915/i915_request.h | 6 +++ 3 files changed, 55 insertions(+), 13 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 6234e17259c1..7358bebef15c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -4,6 +4,8 @@ * Copyright © 2014-2016 Intel Corporation */ +#include + #include "gt/intel_engine.h" #include "i915_gem_ioctls.h" @@ -36,7 +38,7 @@ static __always_inline u32 __busy_write_id(u16 id) } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) +__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -46,29 +48,60 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) * to eventually flush us, but to minimise latency just ask the * hardware. * - * Note we only report on the status of native fences. + * Note we only report on the status of native fences and we currently + * have two native fences: + * + * 1. A composite fence (dma_fence_array) constructed of i915 requests + * created during a parallel submission. In this case we deconstruct the + * composite fence into individual i915 requests and check the status of + * each request. + * + * 2. A single i915 request. */ - if (!dma_fence_is_i915(fence)) + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + struct dma_fence **child = array->fences; + unsigned int nchild = array->num_fences; + + do { + struct dma_fence *current_fence = *child++; + + /* Not an i915 fence, can't be busy per above */ + if (!dma_fence_is_i915(current_fence) || + !test_bit(I915_FENCE_FLAG_COMPOSITE, + ¤t_fence->flags)) { + return 0; + } + + rq = to_request(current_fence); + if (!i915_request_completed(rq)) + return flag(rq->engine->uabi_class); + } while (--nchild); + + /* All requests in array complete, not busy */ return 0; + } else { + if (!dma_fence_is_i915(fence)) + return 0; - /* opencode to_request() in order to avoid const warnings */ - rq = container_of(fence, const struct i915_request, fence); - if (i915_request_completed(rq)) - return 0; + rq = to_request(fence); + if (i915_request_completed(rq)) + return 0; - /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); - return flag(rq->engine->uabi_class); + /* Beware type-expansion follies! */ + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); + return flag(rq->engine->uabi_class); + } } static __always_inline unsigned int -busy_check_reader(const struct dma_fence *fence) +busy_check_reader(struct dma_fence *fence) { return __busy_set_if_active(fence, __busy_read_flag); } static __always_inline unsigned int -busy_check_writer(const struct dma_fence *fence) +busy_check_writer(struct dma_fence *fence) { if (!fence) return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1cb241298d9b..f20a3a5b3c84 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2997,8 +2997,11 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) if (!fences) return ERR_PTR(-ENOMEM); - for_each_batch_create_order(eb, i) + for_each_batch_create_order(eb, i) { fences[i] = &eb->requests[i]->fence; + __set_bit(I915_FENCE_FLAG_COMPOSITE, + &eb->requests[i]->fence.flags); + } fence_array = dma_fence_array_create(eb->num_batches, fences, diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 24db8459376b..dc359242d1ae 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -156,6 +156,12 @@ enum { * submission / relationship encoutered an error. */ I915_FENCE_FLAG_SKIP_PARALLEL, + + /* + * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite + * fence (dma_fence_array) and i915 generated for parallel submission. + */ + I915_FENCE_FLAG_COMPOSITE, }; /** -- cgit v1.2.3 From 4eb61ddc1b67dcb450d3ddbcfef8dfe2c4279a45 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 14 Oct 2021 10:20:04 -0700 Subject: drm/i915: Enable multi-bb execbuf Enable multi-bb execbuf by enabling the set_parallel extension. Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-25-matthew.brost@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 9a00f11fef46..fb33d0322960 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -570,9 +570,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, struct intel_engine_cs **siblings = NULL; intel_engine_mask_t prev_mask; - /* Disabling for now */ - return -ENODEV; - /* FIXME: This is NIY for execlists */ if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) return -ENODEV; -- cgit v1.2.3 From ef7ec41f17cbc0861891ccc0634d06a0c8dcbf09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 14 Oct 2021 12:09:38 +0300 Subject: drm/i915: Replace the unconditional clflush with drm_clflush_virt_range() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not all machines have clflush, so don't go assuming they do. Not really sure why the clflush is even here since hwsp is supposed to get snooped I thought. Although in my case we're talking about a i830 machine where render/blitter snooping is definitely busted. But it might work for the hswp perhaps. Haven't really reverse engineered that one fully. Cc: stable@vger.kernel.org Cc: Chris Wilson Cc: Mika Kuoppala Fixes: b436a5f8b6c8 ("drm/i915/gt: Track all timelines created using the HWSP") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211014090941.12159-2-ville.syrjala@linux.intel.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 593524195707..586dca1731ce 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -292,7 +292,7 @@ static void xcs_sanitize(struct intel_engine_cs *engine) sanitize_hwsp(engine); /* And scrub the dirty cachelines for the HWSP */ - clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); intel_engine_reset_pinned_contexts(engine); } -- cgit v1.2.3 From af7b6d234eefa30c461cc16912bafb32b9e6141c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 14 Oct 2021 12:09:39 +0300 Subject: drm/i915: Convert unconditional clflush to drm_clflush_virt_range() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This one is apparently a "clflush for good measure", so bit more justification (if you can call it that) than some of the others. Convert to drm_clflush_virt_range() again so that machines without clflush will survive the ordeal. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Cc: Thomas Hellström #v1 Fixes: 12ca695d2c1e ("drm/i915: Do not share hwsp across contexts any more, v8.") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211014090941.12159-3-ville.syrjala@linux.intel.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/i915/gt/intel_timeline.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 1257f4f11e66..23d7328892ed 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl) memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno)); WRITE_ONCE(*hwsp_seqno, tl->seqno); - clflush(hwsp_seqno); + drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES); } void intel_timeline_enter(struct intel_timeline *tl) -- cgit v1.2.3 From 9ced12182d0d8401d821e9602e56e276459900fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 14 Oct 2021 12:09:40 +0300 Subject: drm/i915: Catch yet another unconditioal clflush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the unconditional clflush() with drm_clflush_virt_range() which does the wbinvd() fallback when clflush is not available. This time no justification is given for the clflush in the offending commit. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Cc: Thomas Hellström Fixes: 2c8ab3339e39 ("drm/i915: Pin timeline map after first timeline pin, v4.") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211014090941.12159-4-ville.syrjala@linux.intel.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/i915/gt/intel_timeline.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 23d7328892ed..438bbc7b8147 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline) timeline->hwsp_map = vaddr; timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES); - clflush(vaddr + ofs); + drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES); return 0; } -- cgit v1.2.3 From c654dc379379b9fa5323a8b859e05c1ef3c3ff28 Mon Sep 17 00:00:00 2001 From: Ran Jianping Date: Tue, 19 Oct 2021 09:02:05 +0000 Subject: drm/i915/selftests: remove duplicate include in mock_region.c 'drm/ttm/ttm_placement.h' included in 'drivers/gpu/drm/i915/selftests/mock_region.c' is duplicated. It is also included on the 9 line. Reported-by: Zeal Robot Signed-off-by: Ran Jianping Reviewed-by: Matthew Auld Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20211019090205.1003458-1-ran.jianping@zte.com.cn --- drivers/gpu/drm/i915/selftests/mock_region.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index efa86dffe3c6..75793008c4ef 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -6,8 +6,6 @@ #include #include -#include - #include "gem/i915_gem_region.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" -- cgit v1.2.3 From e1f17ea4c36fb1bbd866a71fe07ab42e9a49f5e7 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:00 +0100 Subject: drm/i915: mark dmabuf objects as ALLOC_USER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are userspace objects, so mark them as such. In a later patch it's useful to determine how paranoid we need to be when managing cache flushes. In theory no functional changes. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index afa34111de02..5be505ebbb7b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -301,7 +301,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, + I915_BO_ALLOC_USER); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; -- cgit v1.2.3 From f7858cb48bf8ee70e71933f1a354a666bb802e54 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:01 +0100 Subject: drm/i915: mark userptr objects as ALLOC_USER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are userspace objects, so mark them as such. In a later patch it's useful to determine how paranoid we need to be when managing cache flushes. In theory no functional changes. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 8ea0fa665e53..887aca9e8dd2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -546,7 +546,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, + I915_BO_ALLOC_USER); obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; -- cgit v1.2.3 From 30f1dccd295b6865fad9e41873dce2f76998cbae Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:02 +0100 Subject: drm/i915: extract bypass-llc check into helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It looks like we will need this in some more places, so extract as a helper. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-3-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 1 + drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 17 +---------------- 3 files changed, 28 insertions(+), 16 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 76ce6a1500bc..1e426a42a36c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -128,6 +128,32 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } +bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + /* + * This is purely from a security perspective, so we simply don't care + * about non-userspace objects being able to bypass the LLC. + */ + if (!(obj->flags & I915_BO_ALLOC_USER)) + return false; + + /* + * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it + * possible for userspace to bypass the GTT caching bits set by the + * kernel, as per the given object cache_level. This is troublesome + * since the heavy flush we apply when first gathering the pages is + * skipped if the kernel thinks the object is coherent with the GPU. As + * a result it might be possible to bypass the cache and read the + * contents of the page directly, which could be stale data. If it's + * just a case of userspace shooting themselves in the foot then so be + * it, but since i915 takes the stance of always zeroing memory before + * handing it to userspace, we need to prevent this. + */ + return IS_JSL_EHL(i915); +} + static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) { struct drm_i915_gem_object *obj = to_intel_bo(gem); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9df3ee60604e..59201801cec5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -514,6 +514,7 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); +bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 11f072193f3b..cf11aa7e08a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -182,22 +182,7 @@ rebuild_st: if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); - /* - * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it - * possible for userspace to bypass the GTT caching bits set by the - * kernel, as per the given object cache_level. This is troublesome - * since the heavy flush we apply when first gathering the pages is - * skipped if the kernel thinks the object is coherent with the GPU. As - * a result it might be possible to bypass the cache and read the - * contents of the page directly, which could be stale data. If it's - * just a case of userspace shooting themselves in the foot then so be - * it, but since i915 takes the stance of always zeroing memory before - * handing it to userspace, we need to prevent this. - * - * By setting cache_dirty here we make the clflush in set_pages - * unconditional on such platforms. - */ - if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER) + if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; __i915_gem_object_set_pages(obj, st, sg_page_sizes); -- cgit v1.2.3 From a035154da45d19e09dc68454673ff257a660aece Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:03 +0100 Subject: drm/i915/dmabuf: add paranoid flush-on-acquire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As pointed out by Thomas, we likely need to flush the pages here if the GPU can read the page contents directly from main memory. Underneath we don't know what the sg_table is pointing to, so just add a wbinvd_on_all_cpus() here, for now. Reported-by: Thomas Hellström Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-4-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 5be505ebbb7b..1adcd8e02d29 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -232,6 +232,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *pages; unsigned int sg_page_sizes; @@ -242,8 +243,11 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); - sg_page_sizes = i915_sg_dma_sizes(pages->sgl); + /* XXX: consider doing a vmap flush or something */ + if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj)) + wbinvd_on_all_cpus(); + sg_page_sizes = i915_sg_dma_sizes(pages->sgl); __i915_gem_object_set_pages(obj, pages, sg_page_sizes); return 0; -- cgit v1.2.3 From 63430347713a5ba48617687cc8b2aa1f01514432 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:04 +0100 Subject: drm/i915/userptr: add paranoid flush-on-acquire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though userptr objects are always coherent with the GPU, with no way for userspace to change this with the set_caching ioctl, even on non-LLC platforms, there is still the 'Bypass LCC' mocs setting, which might permit reading the contents of main memory directly. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-5-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 887aca9e8dd2..3173c9f9a040 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -165,8 +165,11 @@ alloc_table: goto err; } - sg_page_sizes = i915_sg_dma_sizes(st->sgl); + WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); + if (i915_gem_object_can_bypass_llc(obj)) + obj->cache_dirty = true; + sg_page_sizes = i915_sg_dma_sizes(st->sgl); __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; -- cgit v1.2.3 From d70af57944a1593f2cd6f94b7eb29fae97929953 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:05 +0100 Subject: drm/i915/shmem: ensure flush during swap-in on non-LLC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On non-LLC platforms, force the flush-on-acquire if this is ever swapped-in. Our async flush path is not trust worthy enough yet(and happens in the wrong order), and with some tricks it's conceivable for userspace to change the cache-level to I915_CACHE_NONE after the pages are swapped-in, and since execbuf binds the object before doing the async flush, there is a potential race window. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-6-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index cf11aa7e08a0..d77da59fae04 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -286,6 +286,8 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) @@ -297,6 +299,16 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, drm_clflush_sg(pages); __start_cpu_write(obj); + /* + * On non-LLC platforms, force the flush-on-acquire if this is ever + * swapped-in. Our async flush path is not trust worthy enough yet(and + * happens in the wrong order), and with some tricks it's conceivable + * for userspace to change the cache-level to I915_CACHE_NONE after the + * pages are swapped-in, and since execbuf binds the object before doing + * the async flush, we have a race window. + */ + if (!HAS_LLC(i915)) + obj->cache_dirty = true; } void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) -- cgit v1.2.3 From df94fd05e69e25dd72a7574405b896540803f8b8 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:06 +0100 Subject: drm/i915: expand on the kernel-doc for cache_dirty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add some details around non-LLC platforms and cflushing, when dealing with the flush-on-acquire, which is potentially security sensitive. Signed-off-by: Matthew Auld Cc: Thomas Hellström Cc: Daniel Vetter Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-7-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 11 ++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 27 ++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index f20a3a5b3c84..4d7da07442f2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1922,6 +1922,17 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) * but gcc's optimiser doesn't handle that as well and emits * two jumps instead of one. Maybe one day... + * + * FIXME: There is also sync flushing in set_pages(), which + * serves a different purpose(some of the time at least). + * + * We should consider: + * + * 1. Rip out the async flush code. + * + * 2. Or make the sync flushing use the async clflush path + * using mandatory fences underneath. Currently the below + * async flush happens after we bind the object. */ if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { if (i915_gem_clflush_object(obj, 0)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 7c3da4e3e737..da85169006d4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -427,6 +427,33 @@ struct drm_i915_gem_object { * can freely bypass the CPU cache when touching the pages with the GPU, * where the kernel is completely unaware. On such platform we need * apply the sledgehammer-on-acquire regardless of the @cache_coherent. + * + * Special care is taken on non-LLC platforms, to prevent potential + * information leak. The driver currently ensures: + * + * 1. All userspace objects, by default, have @cache_level set as + * I915_CACHE_NONE. The only exception is userptr objects, where we + * instead force I915_CACHE_LLC, but we also don't allow userspace to + * ever change the @cache_level for such objects. Another special case + * is dma-buf, which doesn't rely on @cache_dirty, but there we + * always do a forced flush when acquiring the pages, if there is a + * chance that the pages can be read directly from main memory with + * the GPU. + * + * 2. All I915_CACHE_NONE objects have @cache_dirty initially true. + * + * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to + * true. + * + * 4. The @cache_dirty is never freely reset before the initial + * flush, even if userspace adjusts the @cache_level through the + * i915_gem_set_caching_ioctl. + * + * 5. All @cache_dirty objects(including swapped-in) are initially + * flushed with a synchronous call to drm_clflush_sg in + * __i915_gem_object_set_pages. The @cache_dirty can be freely reset + * at this point. All further asynchronous clfushes are never security + * critical, i.e userspace is free to race against itself. */ unsigned int cache_dirty:1; -- cgit v1.2.3 From 3884d8af9b3fcc0c41f04fbd4131be61101d4a4d Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:07 +0100 Subject: drm/i915: mark up internal objects with start_cpu_write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While the pages can't be swapped out, they can be discarded by the shrinker. Normally such objects are marked with __I915_MADV_PURGED, which can't be unset, and therefore requires a new object. For kernel internal objects this is not true, since the madv hint is reset for our special volatile objects, such that we can re-acquire new pages, if so desired, without needing a new object. As a result we should probably be paranoid here and put the object back into the CPU domain when discarding the pages, and also correctly set cache_dirty, if required. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-8-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index e5ae9c06510c..a57a6b7013c2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -134,6 +134,8 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, internal_free_pages(pages); obj->mm.dirty = false; + + __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { -- cgit v1.2.3 From ab5d964c001b9efffcbfa4d67a30186b67d79771 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 18 Oct 2021 18:45:08 +0100 Subject: drm/i915/selftests: mark up hugepages object with start_cpu_write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just like we do for internal objects. Also just use i915_gem_object_set_cache_coherency() here. No need for over-flushing on LLC platforms. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-9-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 41d0680f3bd7..b2003133deaf 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -136,6 +136,8 @@ static void put_huge_pages(struct drm_i915_gem_object *obj, huge_pages_free_pages(pages); obj->mm.dirty = false; + + __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops huge_page_ops = { @@ -152,6 +154,7 @@ huge_pages_object(struct drm_i915_private *i915, { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; + unsigned int cache_level; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); @@ -173,7 +176,9 @@ huge_pages_object(struct drm_i915_private *i915, obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; + + cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; + i915_gem_object_set_cache_coherency(obj, cache_level); obj->mm.page_mask = page_mask; -- cgit v1.2.3