From 067f44c8b4590c3f24d21a037578a478590f2175 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Thu, 29 Jul 2021 18:35:13 +0800 Subject: drm/amdgpu: avoid over-handle of fence driver fini in s3 test (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In amdgpu_fence_driver_hw_fini, no need to call drm_sched_fini to stop scheduler in s3 test, otherwise, fence related failure will arrive after resume. To fix this and for a better clean up, move drm_sched_fini from fence_hw_fini to fence_sw_fini, as it's part of driver shutdown, and should never be called in hw_fini. v2: rename amdgpu_fence_driver_init to amdgpu_fence_driver_sw_init, to keep sw_init and sw_fini paired. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1668 Fixes: 8d35a2596164c1 ("drm/amdgpu: adjust fence driver enable sequence") Suggested-by: Christian König Tested-by: Mike Lothian Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 0b1c48590c43..6ed53669f3e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -498,7 +498,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, } /** - * amdgpu_fence_driver_init - init the fence driver + * amdgpu_fence_driver_sw_init - init the fence driver * for all possible rings. * * @adev: amdgpu device pointer @@ -509,13 +509,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, * amdgpu_fence_driver_start_ring(). * Returns 0 for success. */ -int amdgpu_fence_driver_init(struct amdgpu_device *adev) +int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) { return 0; } /** - * amdgpu_fence_driver_fini - tear down the fence driver + * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. * * @adev: amdgpu device pointer @@ -531,8 +531,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - if (!ring->no_scheduler) - drm_sched_fini(&ring->sched); + /* You can't wait for HW to signal if it's gone */ if (!drm_dev_is_unplugged(&adev->ddev)) r = amdgpu_fence_wait_empty(ring); @@ -560,6 +559,9 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; + if (!ring->no_scheduler) + drm_sched_fini(&ring->sched); + for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); -- cgit v1.2.3 From c530b02f39850a639b72d01ebbf7e5d745c60831 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Wed, 12 May 2021 15:06:35 +0800 Subject: drm/amd/amdgpu embed hw_fence into amdgpu_job Why: Previously hw fence is alloced separately with job. It caused historical lifetime issues and corner cases. The ideal situation is to take fence to manage both job and fence's lifetime, and simplify the design of gpu-scheduler. How: We propose to embed hw_fence into amdgpu_job. 1. We cover the normal job submission by this method. 2. For ib_test, and submit without a parent job keep the legacy way to create a hw fence separately. v2: use AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT to show that the fence is embedded in a job. v3: remove redundant variable ring in amdgpu_job v4: add tdr sequence support for this feature. Add a job_run_counter to indicate whether this job is a resubmit job. v5 add missing handling in amdgpu_fence_enable_signaling Signed-off-by: Jingwen Chen Signed-off-by: Jack Zhang Reviewed-by: Andrey Grodzovsky Reviewed by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 86 ++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 6ed53669f3e0..14499f0de32d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * * @ring: ring the fence is associated with * @f: resulting fence object + * @job: job the fence is embedded in * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, unsigned flags) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence; + struct dma_fence *fence; + struct amdgpu_fence *am_fence; struct dma_fence __rcu **ptr; uint32_t seq; int r; - fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); - if (fence == NULL) - return -ENOMEM; + if (job == NULL) { + /* create a sperate hw fence */ + am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); + if (am_fence == NULL) + return -ENOMEM; + fence = &am_fence->base; + am_fence->ring = ring; + } else { + /* take use of job-embedded fence */ + fence = &job->hw_fence; + } seq = ++ring->fence_drv.sync_seq; - fence->ring = ring; - dma_fence_init(&fence->base, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); + if (job != NULL && job->job_run_counter) { + /* reinit seq for resubmitted jobs */ + fence->seqno = seq; + } else { + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, + seq); + } + + if (job != NULL) { + /* mark this fence has a parent job */ + set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags); + } + amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); @@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); + rcu_assign_pointer(*ptr, dma_fence_get(fence)); - *f = &fence->base; + *f = fence; return 0; } @@ -621,8 +641,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_fence *fence = to_amdgpu_fence(f); - return (const char *)fence->ring->name; + struct amdgpu_ring *ring; + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + + ring = to_amdgpu_ring(job->base.sched); + } else { + ring = to_amdgpu_fence(f)->ring; + } + return (const char *)ring->name; } /** @@ -635,13 +663,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) */ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_fence *fence = to_amdgpu_fence(f); - struct amdgpu_ring *ring = fence->ring; + struct amdgpu_ring *ring; + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + + ring = to_amdgpu_ring(job->base.sched); + } else { + ring = to_amdgpu_fence(f)->ring; + } if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); + DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx); return true; } @@ -656,8 +691,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) static void amdgpu_fence_free(struct rcu_head *rcu) { struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct amdgpu_fence *fence = to_amdgpu_fence(f); - kmem_cache_free(amdgpu_fence_slab, fence); + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + /* free job if fence has a parent job */ + struct amdgpu_job *job; + + job = container_of(f, struct amdgpu_job, hw_fence); + kfree(job); + } else { + /* free fence_slab if it's separated fence*/ + struct amdgpu_fence *fence; + + fence = to_amdgpu_fence(f); + kmem_cache_free(amdgpu_fence_slab, fence); + } } /** @@ -680,6 +727,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; + /* * Fence debugfs */ -- cgit v1.2.3 From f7d6779df642720e22bffd449e683bb8690bd3bf Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 27 Aug 2021 18:31:41 +0800 Subject: drm/amdgpu: stop scheduler when calling hw_fini (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This gurantees no more work on the ring can be submitted to hardware in suspend/resume case, otherwise a potential race will occur and the ring will get no chance to stay empty before suspend. v2: Call drm_sched_resubmit_job before drm_sched_start to restart jobs from the pending list. Suggested-by: Andrey Grodzovsky Suggested-by: Christian König Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 14499f0de32d..8d682befe0d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -552,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; + if (!ring->no_scheduler) + drm_sched_stop(&ring->sched, NULL); + /* You can't wait for HW to signal if it's gone */ if (!drm_dev_is_unplugged(&adev->ddev)) r = amdgpu_fence_wait_empty(ring); @@ -611,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; + if (!ring->no_scheduler) { + drm_sched_resubmit_jobs(&ring->sched); + drm_sched_start(&ring->sched, true); + } + /* enable the interrupt */ if (ring->fence_drv.irq_src) amdgpu_irq_get(adev, ring->fence_drv.irq_src, -- cgit v1.2.3