diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2022-12-30 04:09:09 -0500 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2022-12-30 04:18:36 -0500 |
commit | b501d4dc83aa3940189b68045cadc8b3eac73988 (patch) | |
tree | e5310a4fafd59505c3db86bb05baf964cb8da944 /drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |
parent | 4071d98b296a5bc5fd4b15ec651bd05800ec9510 (diff) | |
parent | 1b929c02afd37871d5afb9d498426f83432e71c2 (diff) | |
download | linux-b501d4dc83aa3940189b68045cadc8b3eac73988.tar.gz linux-b501d4dc83aa3940189b68045cadc8b3eac73988.tar.bz2 linux-b501d4dc83aa3940189b68045cadc8b3eac73988.zip |
Merge drm/drm-next into drm-intel-gt-next
Sync after v6.2-rc1 landed in drm-next.
We need to get some dependencies in place before we can merge
the fixes series from Gwan-gyeong and Chris.
References: https://lore.kernel.org/all/Y6x5JCDnh2rvh4lA@intel.com/
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 330 |
1 files changed, 284 insertions, 46 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0320be4a5fc6..f202b45c413c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -47,6 +47,7 @@ #include "amdgpu_ras.h" +#include "amdgpu_ring_mux.h" #include "gfx_v9_4.h" #include "gfx_v9_0.h" #include "gfx_v9_4_2.h" @@ -56,6 +57,7 @@ #include "asic_reg/gc/gc_9_0_default.h" #define GFX9_NUM_GFX_RINGS 1 +#define GFX9_NUM_SW_GFX_RINGS 2 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L @@ -753,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); @@ -826,9 +828,10 @@ static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); if (action == PREEMPT_QUEUES_NO_UNMAP) { - amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); - amdgpu_ring_write(kiq_ring, seq); + amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } else { amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); @@ -1564,7 +1567,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) mask = 1; cu_bitmap = 0; counter = 0; - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (cu_info->bitmap[i][j] & mask) { @@ -1583,7 +1586,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = cu_bitmap; } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1605,7 +1608,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -1654,7 +1657,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -1919,8 +1922,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) u32 gb_addr_config; int err; - adev->gfx.funcs = &gfx_v9_0_gfx_funcs; - switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): adev->gfx.config.max_hw_contexts = 8; @@ -2105,6 +2106,7 @@ static int gfx_v9_0_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned int hw_prio; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): @@ -2188,6 +2190,9 @@ static int gfx_v9_0_sw_init(void *handle) sprintf(ring->name, "gfx_%d", i); ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; + + /* disable scheduler on the real ring */ + ring->no_scheduler = true; r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2195,6 +2200,41 @@ static int gfx_v9_0_sw_init(void *handle) return r; } + /* set up the software rings */ + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) { + ring = &adev->gfx.sw_gfx_ring[i]; + ring->ring_obj = NULL; + sprintf(ring->name, amdgpu_sw_ring_name(i)); + ring->use_doorbell = true; + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; + ring->is_sw_ring = true; + hw_prio = amdgpu_sw_ring_priority(i); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, + NULL); + if (r) + return r; + ring->wptr = 0; + } + + /* init the muxer and add software rings */ + r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0], + GFX9_NUM_SW_GFX_RINGS); + if (r) { + DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r); + return r; + } + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) { + r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer, + &adev->gfx.sw_gfx_ring[i]); + if (r) { + DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r); + return r; + } + } + } + /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; for (i = 0; i < adev->gfx.mec.num_mec; ++i) { @@ -2245,6 +2285,12 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]); + amdgpu_ring_mux_fini(&adev->gfx.muxer); + } + for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); for (i = 0; i < adev->gfx.num_compute_rings; i++) @@ -2324,13 +2370,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v9_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -2467,14 +2513,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; udelay(1); } if (k == adev->usec_timeout) { - gfx_v9_0_select_se_sh(adev, 0xffffffff, + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", @@ -2483,7 +2529,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3583,6 +3629,8 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; + if (amdgpu_sriov_vf(adev) && adev->in_suspend) + amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v9_0_mqd_init(ring); @@ -4539,6 +4587,8 @@ static int gfx_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gfx.funcs = &gfx_v9_0_gfx_funcs; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) adev->gfx.num_gfx_rings = 0; @@ -5155,11 +5205,17 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) { control |= INDIRECT_BUFFER_PRE_ENB(1); + if (flags & AMDGPU_IB_PREEMPTED) + control |= INDIRECT_BUFFER_PRE_RESUME(1); + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) - gfx_v9_0_ring_emit_de_meta(ring); + gfx_v9_0_ring_emit_de_meta(ring, + (!amdgpu_sriov_vf(ring->adev) && + flags & AMDGPU_IB_PREEMPTED) ? + true : false); } amdgpu_ring_write(ring, header); @@ -5214,17 +5270,24 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; + uint32_t dw2 = 0; /* RELEASE_MEM - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); - amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | - EOP_TC_NC_ACTION_EN) : - (EOP_TCL1_ACTION_EN | - EOP_TC_ACTION_EN | - EOP_TC_WB_ACTION_EN | - EOP_TC_MD_ACTION_EN)) | - EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | - EVENT_INDEX(5))); + + if (writeback) { + dw2 = EOP_TC_NC_ACTION_EN; + } else { + dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | + EOP_TC_MD_ACTION_EN; + } + dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5); + if (exec) + dw2 |= EOP_EXEC; + + amdgpu_ring_write(ring, dw2); amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); /* @@ -5329,33 +5392,135 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0); } -static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) +static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) { + struct amdgpu_device *adev = ring->adev; struct v9_ce_ib_state ce_payload = {0}; - uint64_t csa_addr; + uint64_t offset, ce_payload_gpu_addr; + void *ce_payload_cpu_addr; int cnt; cnt = (sizeof(ce_payload) >> 2) + 4 - 2; - csa_addr = amdgpu_csa_vaddr(ring->adev); + + if (ring->is_mes_queue) { + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_gpu_addr = + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + ce_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + } else { + offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | WRITE_DATA_DST_SEL(8) | WR_CONFIRM) | WRITE_DATA_CACHE_POLICY(0)); - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); - amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); + amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); + + if (resume) + amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, + sizeof(ce_payload) >> 2); + else + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, + sizeof(ce_payload) >> 2); +} + +static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) +{ + int i, r = 0; + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + /* assert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, false); + + ring->trail_seq += 1; + amdgpu_ring_alloc(ring, 13); + gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, + ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT); + /*reset the CP_VMID_PREEMPT after trailing fence*/ + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), + 0x0); + + /* assert IB preemption, emit the trailing fence */ + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, + ring->trail_fence_gpu_addr, + ring->trail_seq); + + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + /* poll the trailing fence */ + for (i = 0; i < adev->usec_timeout; i++) { + if (ring->trail_seq == + le32_to_cpu(*ring->trail_fence_cpu_addr)) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + r = -EINVAL; + DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); + } + + amdgpu_ring_commit(ring); + + /* deassert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, true); + return r; } -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) { + struct amdgpu_device *adev = ring->adev; struct v9_de_ib_state de_payload = {0}; - uint64_t csa_addr, gds_addr; + uint64_t offset, gds_addr, de_payload_gpu_addr; + void *de_payload_cpu_addr; int cnt; - csa_addr = amdgpu_csa_vaddr(ring->adev); - gds_addr = csa_addr + 4096; + if (ring->is_mes_queue) { + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_gpu_addr = + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gds_backup) + + offsetof(struct v9_gfx_meta_data, de_payload); + gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + } else { + offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + + AMDGPU_CSA_SIZE - adev->gds.gds_size, + PAGE_SIZE); + } + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); @@ -5365,9 +5530,15 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) WRITE_DATA_DST_SEL(8) | WR_CONFIRM) | WRITE_DATA_CACHE_POLICY(0)); - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); - amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); + amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + + if (resume) + amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, + sizeof(de_payload) >> 2); + else + amdgpu_ring_write_multiple(ring, (void *)&de_payload, + sizeof(de_payload) >> 2); } static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, @@ -5383,8 +5554,9 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) { uint32_t dw2 = 0; - if (amdgpu_sriov_vf(ring->adev)) - gfx_v9_0_ring_emit_ce_meta(ring); + gfx_v9_0_ring_emit_ce_meta(ring, + (!amdgpu_sriov_vf(ring->adev) && + flags & AMDGPU_IB_PREEMPTED) ? true : false); dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { @@ -5710,7 +5882,12 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, switch (me_id) { case 0: - amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + if (adev->gfx.num_gfx_rings && + !amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) { + /* Fence signals are handled on the software rings*/ + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]); + } break; case 1: case 2: @@ -6482,7 +6659,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0x0, k); + amdgpu_gfx_select_se_sh(adev, j, 0x0, k); RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); } } @@ -6544,7 +6721,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0, k); + amdgpu_gfx_select_se_sh(adev, j, 0, k); reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) @@ -6559,7 +6736,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); @@ -6707,6 +6884,62 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, + .preempt_ib = gfx_v9_0_ring_preempt_ib, + .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, + .emit_wreg = gfx_v9_0_ring_emit_wreg, + .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v9_0_ring_soft_recovery, + .emit_mem_sync = gfx_v9_0_emit_mem_sync, +}; + +static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { + .type = AMDGPU_RING_TYPE_GFX, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .secure_submission_supported = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = amdgpu_sw_ring_get_rptr_gfx, + .get_wptr = amdgpu_sw_ring_get_wptr_gfx, + .set_wptr = amdgpu_sw_ring_set_wptr_gfx, + .emit_frame_size = /* totally 242 maximum if 16 IBs */ + 5 + /* COND_EXEC */ + 7 + /* PIPELINE_SYNC */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* VM_FLUSH */ + 8 + /* FENCE for VM_FLUSH */ + 20 + /* GDS switch */ + 4 + /* double SWITCH_BUFFER, + * the first COND_EXEC jump to the place just + * prior to this double SWITCH_BUFFER + */ + 5 + /* COND_EXEC */ + 7 + /* HDP_flush */ + 4 + /* VGT_flush */ + 14 + /* CE_META */ + 31 + /* DE_META */ + 3 + /* CNTX_CTRL */ + 5 + /* HDP_INVL */ + 8 + 8 + /* FENCE x2 */ + 2 + /* SWITCH_BUFFER */ + 7, /* gfx_v9_0_emit_mem_sync */ + .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ + .emit_ib = gfx_v9_0_ring_emit_ib_gfx, + .emit_fence = gfx_v9_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, + .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, + .test_ring = gfx_v9_0_ring_test_ring, + .test_ib = gfx_v9_0_ring_test_ib, + .insert_nop = amdgpu_sw_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_switch_buffer = gfx_v9_ring_emit_sb, + .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, + .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, + .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, @@ -6792,6 +7025,11 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; + if (adev->gfx.num_gfx_rings) { + for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) + adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx; + } + for (i = 0; i < adev->gfx.num_compute_rings; i++) adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; } @@ -6963,7 +7201,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); gfx_v9_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); @@ -6996,7 +7234,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; |