summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c97
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c68
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/bridge/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c709
-rw-r--r--drivers/gpu/drm/drm_cache.c148
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c7
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c3
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c4
-rw-r--r--drivers/gpu/drm/hyperv/Makefile8
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h52
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c311
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c231
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c485
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c85
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c145
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c139
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c (renamed from drivers/gpu/drm/i915/display/intel_csr.c)386
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c71
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c104
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c187
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c58
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c27
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c53
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c44
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h200
-rw-r--r--drivers/gpu/drm/i915/i915_request.c10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c24
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c323
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h4
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c24
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c6
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c8
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c18
-rw-r--r--drivers/gpu/drm/stm/ltdc.c33
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c202
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c377
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c64
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_sys_manager.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c47
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h3
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c12
206 files changed, 5663 insertions, 1917 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 594ac6c3a1f4..7ff89690a976 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -98,8 +98,8 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
+ depends on FB
select DRM_KMS_HELPER
- select FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -379,6 +379,19 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+config DRM_HYPERV
+ tristate "DRM Support for Hyper-V synthetic video device"
+ depends on DRM && PCI && MMU && HYPERV
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ help
+ This is a KMS driver for Hyper-V synthetic video device. Choose this
+ option if you would like to enable drm driver for Hyper-V virtual
+ machine. Unselect Hyper-V framebuffer driver (CONFIG_FB_HYPERV) so
+ that DRM driver is used by default.
+
+ If M is selected the module will be called hyperv_drm.
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a91cc7684904..a118692a6df7 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -126,3 +126,4 @@ obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
+obj-$(CONFIG_DRM_HYPERV) += hyperv/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d6cb7cf76623..fdee98fae2ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
if (!ef)
return -EINVAL;
- old = dma_resv_get_list(resv);
+ old = dma_resv_shared_list(resv);
if (!old)
return 0;
@@ -1671,7 +1671,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the next restore worker
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
- bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
ret = vm_validate_pt_pd_bos(avm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 861dd5f79d7f..6a242ec3f7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4136,9 +4136,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
/* No need to recover an evicted BO */
- if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
- shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
r = amdgpu_bo_restore_shadow(shadow, &next);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 2b978b7e7a4b..2b6a66c849d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ &work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 6ec1312b7389..c3053b83b80c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -49,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
unsigned int count;
int r;
- if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
- r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
@@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (r)
return ERR_PTR(r);
- } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
AMDGPU_GEM_DOMAIN_GTT)) {
return ERR_PTR(-EBUSY);
}
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
@@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
- bo->tbo.base.size, attach->dev, dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+ bo->tbo.base.size, attach->dev,
+ dir, &sgt);
if (r)
return ERR_PTR(r);
break;
@@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 88ea77b1e68a..9cf4beaf646c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- fence = dma_resv_get_excl(bo->tbo.base.resv);
+ fence = dma_resv_excl_fence(bo->tbo.base.resv);
if (fence) {
amdgpu_bo_fence(bo, fence, true);
fence = NULL;
@@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 4fc1515381f5..0174f7817ce2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
*addr = bo->tbo.ttm->dma_address[0];
break;
@@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
*addr = 0;
break;
}
- *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
+ *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 9ab33048923e..ec96e0b26b11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,18 +22,26 @@
* Authors: Christian König
*/
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
+struct amdgpu_gtt_node {
+ struct ttm_buffer_object *tbo;
+ struct ttm_range_mgr_node base;
+};
+
static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_gtt_mgr, manager);
}
-struct amdgpu_gtt_node {
- struct drm_mm_node node;
- struct ttm_buffer_object *tbo;
-};
+static inline struct amdgpu_gtt_node *
+to_amdgpu_gtt_node(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_gtt_node, base.base);
+}
/**
* DOC: mem_info_gtt_total
@@ -93,13 +101,15 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
/**
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
- * @mem: the mem object to check
+ * @res: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- return mem->mm_node != NULL;
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+
+ return drm_mm_node_allocated(&node->base.mm_nodes[0]);
}
/**
@@ -115,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
- if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
- atomic64_read(&mgr->available) < mem->num_pages) {
+ if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
+ atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;
}
- atomic64_sub(mem->num_pages, &mgr->available);
+ atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
- if (!place->lpfn) {
- mem->mm_node = NULL;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
- return 0;
- }
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
r = -ENOMEM;
goto err_out;
}
node->tbo = tbo;
+ ttm_resource_init(tbo, place, &node->base.base);
- spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
- tbo->page_alignment, 0, place->fpfn,
- place->lpfn, DRM_MM_INSERT_BEST);
- spin_unlock(&mgr->lock);
-
- if (unlikely(r))
- goto err_free;
-
- mem->mm_node = node;
- mem->start = node->node.start;
+ if (place->lpfn) {
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range(&mgr->mm,
+ &node->base.mm_nodes[0],
+ num_pages, tbo->page_alignment,
+ 0, place->fpfn, place->lpfn,
+ DRM_MM_INSERT_BEST);
+ spin_unlock(&mgr->lock);
+ if (unlikely(r))
+ goto err_free;
+
+ node->base.base.start = node->base.mm_nodes[0].start;
+ } else {
+ node->base.mm_nodes[0].start = 0;
+ node->base.mm_nodes[0].size = node->base.base.num_pages;
+ node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+ }
+ *res = &node->base.base;
return 0;
err_free:
kfree(node);
err_out:
- atomic64_add(mem->num_pages, &mgr->available);
+ atomic64_add(num_pages, &mgr->available);
return r;
}
@@ -176,19 +189,18 @@ err_out:
* Free the allocated GTT again.
*/
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_gtt_node *node = mem->mm_node;
- if (node) {
- spin_lock(&mgr->lock);
- drm_mm_remove_node(&node->node);
- spin_unlock(&mgr->lock);
- kfree(node);
- }
+ spin_lock(&mgr->lock);
+ if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
+ drm_mm_remove_node(&node->base.mm_nodes[0]);
+ spin_unlock(&mgr->lock);
+ atomic64_add(res->num_pages, &mgr->available);
- atomic64_add(mem->num_pages, &mgr->available);
+ kfree(node);
}
/**
@@ -224,7 +236,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
- node = container_of(mm_node, struct amdgpu_gtt_node, node);
+ node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo);
if (r)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index c7f3aae23c62..b7fb72bff2c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned count;
int r;
- r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences(resv, NULL, &count, &fences);
if (r)
goto fallback;
@@ -156,8 +156,7 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 2741c28ff1b5..d6c54c7f7679 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 16417802bba9..b7a2070d90af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -364,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- &(*bo_ptr)->tbo.mem, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
@@ -575,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+ bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -777,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -905,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.pin_count) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
- uint32_t mem_flags = bo->tbo.mem.placement;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
+ uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
@@ -956,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
ttm_bo_pin(&bo->tbo);
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -1008,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
}
@@ -1245,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1256,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->mem.mem_type != TTM_PL_SYSTEM)
+ bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
@@ -1276,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
{
unsigned int domain;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
*vram_mem += amdgpu_bo_size(bo);
@@ -1318,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+ if (bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1355,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
@@ -1381,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
@@ -1468,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
- WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1490,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
- offset = (bo->tbo.mem.start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
@@ -1544,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
unsigned int pin_count;
u64 size;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 90eab1c31027..126df03a7066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -223,10 +223,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d607f314cc1b..f6aff7ce5160 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -66,14 +66,18 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
- atomic64_add(mem->num_pages, &mgr->used);
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (*res)
+ return -ENOMEM;
- mem->mm_node = NULL;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
+ ttm_resource_init(tbo, place, *res);
+ (*res)->start = AMDGPU_BO_INVALID_OFFSET;
+
+ atomic64_add((*res)->num_pages, &mgr->used);
return 0;
}
@@ -86,11 +90,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
* Free the allocated GTT again.
*/
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
- atomic64_sub(mem->num_pages, &mgr->used);
+ atomic64_sub(res->num_pages, &mgr->used);
+ kfree(res);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 40f2adf305bc..59e0fefb15aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -28,6 +28,7 @@
#include <drm/drm_mm.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
{
struct drm_mm_node *node;
- if (!res || !res->mm_node) {
+ if (!res) {
cur->start = start;
cur->size = size;
cur->remaining = size;
@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = res->mm_node;
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2..1b2ceccaf5b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -EINVAL;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 792d20261846..0527772fe1b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
- __entry->type = bo->tbo.mem.mem_type;
+ __entry->pages = bo->tbo.resource->num_pages;
+ __entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 118911059dc1..80dff29f2bc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include <drm/amdgpu_drm.h>
@@ -125,7 +126,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
rcu_read_unlock();
return;
}
- switch (bo->mem.mem_type) {
+
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
@@ -460,7 +462,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
if (new_mem->mem_type == TTM_PL_TT ||
@@ -495,7 +497,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -605,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_res_cursor cursor;
- amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -954,50 +957,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->mem.start = addr >> PAGE_SHIFT;
- } else {
+ bo->resource->start = addr >> PAGE_SHIFT;
+ return 0;
+ }
- /* allocate GART space */
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->mem.placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- amdgpu_gart_invalidate_tlb(adev);
- ttm_resource_free(bo, &bo->mem);
- bo->mem = tmp;
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
@@ -1016,7 +1019,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
@@ -1330,12 +1333,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
+ unsigned long num_pages = bo->resource->num_pages;
struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
@@ -1344,7 +1351,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_get_list(bo->base.resv);
+ flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
@@ -1354,7 +1361,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
}
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_PREEMPT:
/* Preemptible BOs don't own system resources managed by the
* driver (pages, VRAM, GART space). They point to resources
@@ -1372,7 +1379,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
&cursor);
while (cursor.remaining) {
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
@@ -1414,10 +1421,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
uint32_t value = 0;
int ret = 0;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
@@ -1952,21 +1959,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
+ if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
DRM_ERROR("Trying to clear preemptible memory.\n");
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
- num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
+ num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
amdgpu_res_next(&cursor, cursor.size);
@@ -1991,12 +1998,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
uint64_t dst_addr = cursor.start;
- dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ dst_addr += amdgpu_ttm_domain_start(adev,
+ bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index ce8f80a959c6..0f576f294d8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1125,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b4e46c7222f1..18246b5b6ee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
amdgpu_vm_bo_idle(base);
if (bo->preferred_domains &
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
return;
/*
@@ -658,10 +658,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
&vm->lru_bulk_move);
if (shadow)
- ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
+ ttm_bo_move_to_lru_tail(&shadow->tbo,
+ shadow->tbo.resource,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
@@ -1859,10 +1860,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
- mem = &bo->tbo.mem;
+ mem = bo->tbo.resource;
if (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT)
pages_addr = bo->tbo.ttm->dma_address;
@@ -1923,7 +1924,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(mem_type)))
@@ -2064,13 +2065,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned i, shared_count;
int r;
- r = dma_resv_get_fences_rcu(resv, &excl,
- &shared_count, &shared);
+ r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -2682,7 +2682,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
return false;
/* Try to block ongoing updates */
@@ -2862,8 +2862,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+ true, timeout);
if (timeout <= 0)
return timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07e007dbff7c..436ec246a7da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,8 @@
*/
#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
@@ -217,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *mem = &bo->tbo.mem;
- struct drm_mm_node *nodes = mem->mm_node;
- unsigned pages = mem->num_pages;
+ struct ttm_resource *res = bo->tbo.resource;
+ unsigned pages = res->num_pages;
+ struct drm_mm_node *mm;
u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
- if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
- usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+ mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+ for (usage = 0; pages; pages -= mm->size, mm++)
+ usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage;
}
@@ -365,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t vis_usage = 0, mem_bytes, max_bytes;
+ struct ttm_range_mgr_node *node;
struct drm_mm *mm = &mgr->mm;
enum drm_mm_insert_mode mode;
- struct drm_mm_node *nodes;
unsigned i;
int r;
@@ -386,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */
- mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
+ mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOSPC;
+ r = -ENOSPC;
+ goto error_sub;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -404,22 +407,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
#endif
pages_per_node = max_t(uint32_t, pages_per_node,
tbo->page_alignment);
- num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
}
- nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
- GFP_KERNEL | __GFP_ZERO);
- if (!nodes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOMEM;
+ node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!node) {
+ r = -ENOMEM;
+ goto error_sub;
}
+ ttm_resource_init(tbo, place, &node->base);
+
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
- mem->start = 0;
- pages_left = mem->num_pages;
+ pages_left = node->base.num_pages;
/* Limit maximum size to 2GB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (pages >= pages_per_node)
alignment = pages_per_node;
- r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
- 0, place->fpfn, lpfn, mode);
+ r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+ alignment, 0, place->fpfn,
+ lpfn, mode);
if (unlikely(r)) {
if (pages > pages_per_node) {
if (is_power_of_2(pages))
@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
pages = rounddown_pow_of_two(pages);
continue;
}
- goto error;
+ goto error_free;
}
- vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
- amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+ amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
pages_left -= pages;
++i;
@@ -456,19 +461,20 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock);
if (i == 1)
- mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+ node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage);
- mem->mm_node = nodes;
+ *res = &node->base;
return 0;
-error:
+error_free:
while (i--)
- drm_mm_remove_node(&nodes[i]);
+ drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock);
- atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+ kvfree(node);
- kvfree(nodes);
+error_sub:
+ atomic64_sub(mem_bytes, &mgr->usage);
return r;
}
@@ -481,24 +487,22 @@ error:
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_mm_node *nodes = mem->mm_node;
uint64_t usage = 0, vis_usage = 0;
- unsigned pages = mem->num_pages;
-
- if (!mem->mm_node)
- return;
+ unsigned i, pages;
spin_lock(&mgr->lock);
- while (pages) {
- pages -= nodes->size;
- drm_mm_remove_node(nodes);
- usage += nodes->size << PAGE_SHIFT;
- vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
- ++nodes;
+ for (i = 0, pages = res->num_pages; pages;
+ pages -= node->mm_nodes[i].size, ++i) {
+ struct drm_mm_node *mm = &node->mm_nodes[i];
+
+ drm_mm_remove_node(mm);
+ usage += mm->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
@@ -506,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
- kvfree(mem->mm_node);
- mem->mm_node = NULL;
+ kvfree(node);
}
/**
@@ -524,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_resource *mem,
+ struct ttm_resource *res,
u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
@@ -540,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
amdgpu_res_next(&cursor, cursor.size);
@@ -560,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
* and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported
*/
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de2..2660f03e63a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
return r;
}
-static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
- struct drm_mm_node **mm_node, uint64_t *offset)
-{
- struct drm_mm_node *node = *mm_node;
- uint64_t pos = *offset;
-
- if (node->start == AMDGPU_BO_INVALID_OFFSET) {
- pr_debug("drm node is not validated\n");
- return 0;
- }
-
- pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
- node->size);
-
- if (pos >= node->size) {
- do {
- pos -= node->size;
- node++;
- } while (pos >= node->size);
-
- *mm_node = node;
- *offset = pos;
- }
-
- return (node->start + pos) << PAGE_SHIFT;
-}
-
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
{
uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
dma_addr_t *src;
uint64_t *dst;
- uint64_t vram_addr;
- uint64_t offset;
uint64_t i, j;
int r;
@@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
goto out;
}
- node = prange->ttm_res->mm_node;
- offset = prange->offset;
- vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
- if (!vram_addr) {
- WARN_ONCE(1, "vram node address is 0\n");
- r = -ENOMEM;
- goto out;
- }
-
+ amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
- dst[i] = vram_addr + (j << PAGE_SHIFT);
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
@@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
mfence);
if (r)
goto out_free_vram_pages;
- offset += j;
- vram_addr = (node->start + offset) << PAGE_SHIFT;
+ amdgpu_res_next(&cursor, j << PAGE_SHIFT);
j = 0;
} else {
- offset++;
- vram_addr += PAGE_SIZE;
- }
- if (offset >= node->size) {
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
+ amdgpu_res_next(&cursor, PAGE_SIZE);
}
continue;
}
@@ -373,19 +329,15 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
src[i] >> PAGE_SHIFT, page_to_pfn(spage));
- if (j + offset >= node->size - 1 && i < npages - 1) {
+ if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
r = svm_migrate_copy_memory_gart(adev, src + i - j,
dst + i - j, j + 1,
FROM_RAM_TO_VRAM,
mfence);
if (r)
goto out_free_vram_pages;
-
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
- j = 0;
+ amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
+ j= 0;
} else {
j++;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 0f18bd0dc64e..da569d417d1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
- prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
+ prange->ttm_res = prange->svm_bo->bo->tbo.resource;
return true;
}
@@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
svm_bo->bo = bo;
prange->svm_bo = svm_bo;
- prange->ttm_res = &bo->tbo.mem;
+ prange->ttm_res = bo->tbo.resource;
prange->offset = 0;
spin_lock(&svm_bo->list_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d08459227629..6fda0dfb78f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8491,9 +8491,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
- false,
- msecs_to_jiffies(5000));
+ r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c96e4b38d1d3..85b673613687 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -285,6 +285,16 @@ config DRM_TI_TFP410
help
Texas Instruments TFP410 DVI/HDMI Transmitter driver
+config DRM_TI_SN65DSI83
+ tristate "TI SN65DSI83 and SN65DSI84 DSI to LVDS bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Texas Instruments SN65DSI83 and SN65DSI84 DSI to LVDS Bridge driver
+
config DRM_TI_SN65DSI86
tristate "TI SN65DSI86 DSI to eDP bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index dff2b2897529..f2c73683cfcb 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
new file mode 100644
index 000000000000..750f2172ef08
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI SN65DSI83,84,85 driver
+ *
+ * Currently supported:
+ * - SN65DSI83
+ * = 1x Single-link DSI ~ 1x Single-link LVDS
+ * - Supported
+ * - Single-link LVDS mode tested
+ * - SN65DSI84
+ * = 1x Single-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ * - Supported
+ * - Dual-link LVDS mode tested
+ * - 2x Single-link LVDS mode unsupported
+ * (should be easy to add by someone who has the HW)
+ * - SN65DSI85
+ * = 2x Single-link or 1x Dual-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ * - Unsupported
+ * (should be easy to add by someone who has the HW)
+ *
+ * Copyright (C) 2021 Marek Vasut <marex@denx.de>
+ *
+ * Based on previous work of:
+ * Valentin Raevsky <valentin@compulab.co.il>
+ * Philippe Schenker <philippe.schenker@toradex.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* ID registers */
+#define REG_ID(n) (0x00 + (n))
+/* Reset and clock registers */
+#define REG_RC_RESET 0x09
+#define REG_RC_RESET_SOFT_RESET BIT(0)
+#define REG_RC_LVDS_PLL 0x0a
+#define REG_RC_LVDS_PLL_PLL_EN_STAT BIT(7)
+#define REG_RC_LVDS_PLL_LVDS_CLK_RANGE(n) (((n) & 0x7) << 1)
+#define REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY BIT(0)
+#define REG_RC_DSI_CLK 0x0b
+#define REG_RC_DSI_CLK_DSI_CLK_DIVIDER(n) (((n) & 0x1f) << 3)
+#define REG_RC_DSI_CLK_REFCLK_MULTIPLIER(n) ((n) & 0x3)
+#define REG_RC_PLL_EN 0x0d
+#define REG_RC_PLL_EN_PLL_EN BIT(0)
+/* DSI registers */
+#define REG_DSI_LANE 0x10
+#define REG_DSI_LANE_LEFT_RIGHT_PIXELS BIT(7) /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_DUAL 0 /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_2SINGLE BIT(6) /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE BIT(5)
+#define REG_DSI_LANE_CHA_DSI_LANES(n) (((n) & 0x3) << 3)
+#define REG_DSI_LANE_CHB_DSI_LANES(n) (((n) & 0x3) << 1)
+#define REG_DSI_LANE_SOT_ERR_TOL_DIS BIT(0)
+#define REG_DSI_EQ 0x11
+#define REG_DSI_EQ_CHA_DSI_DATA_EQ(n) (((n) & 0x3) << 6)
+#define REG_DSI_EQ_CHA_DSI_CLK_EQ(n) (((n) & 0x3) << 2)
+#define REG_DSI_CLK 0x12
+#define REG_DSI_CLK_CHA_DSI_CLK_RANGE(n) ((n) & 0xff)
+/* LVDS registers */
+#define REG_LVDS_FMT 0x18
+#define REG_LVDS_FMT_DE_NEG_POLARITY BIT(7)
+#define REG_LVDS_FMT_HS_NEG_POLARITY BIT(6)
+#define REG_LVDS_FMT_VS_NEG_POLARITY BIT(5)
+#define REG_LVDS_FMT_LVDS_LINK_CFG BIT(4) /* 0:AB 1:A-only */
+#define REG_LVDS_FMT_CHA_24BPP_MODE BIT(3)
+#define REG_LVDS_FMT_CHB_24BPP_MODE BIT(2)
+#define REG_LVDS_FMT_CHA_24BPP_FORMAT1 BIT(1)
+#define REG_LVDS_FMT_CHB_24BPP_FORMAT1 BIT(0)
+#define REG_LVDS_VCOM 0x19
+#define REG_LVDS_VCOM_CHA_LVDS_VOCM BIT(6)
+#define REG_LVDS_VCOM_CHB_LVDS_VOCM BIT(4)
+#define REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(n) (((n) & 0x3) << 2)
+#define REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(n) ((n) & 0x3)
+#define REG_LVDS_LANE 0x1a
+#define REG_LVDS_LANE_EVEN_ODD_SWAP BIT(6)
+#define REG_LVDS_LANE_CHA_REVERSE_LVDS BIT(5)
+#define REG_LVDS_LANE_CHB_REVERSE_LVDS BIT(4)
+#define REG_LVDS_LANE_CHA_LVDS_TERM BIT(1)
+#define REG_LVDS_LANE_CHB_LVDS_TERM BIT(0)
+#define REG_LVDS_CM 0x1b
+#define REG_LVDS_CM_CHA_LVDS_CM_ADJUST(n) (((n) & 0x3) << 4)
+#define REG_LVDS_CM_CHB_LVDS_CM_ADJUST(n) ((n) & 0x3)
+/* Video registers */
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW 0x20
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH 0x21
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW 0x24
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH 0x25
+#define REG_VID_CHA_SYNC_DELAY_LOW 0x28
+#define REG_VID_CHA_SYNC_DELAY_HIGH 0x29
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW 0x2c
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH 0x2d
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW 0x30
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH 0x31
+#define REG_VID_CHA_HORIZONTAL_BACK_PORCH 0x34
+#define REG_VID_CHA_VERTICAL_BACK_PORCH 0x36
+#define REG_VID_CHA_HORIZONTAL_FRONT_PORCH 0x38
+#define REG_VID_CHA_VERTICAL_FRONT_PORCH 0x3a
+#define REG_VID_CHA_TEST_PATTERN 0x3c
+/* IRQ registers */
+#define REG_IRQ_GLOBAL 0xe0
+#define REG_IRQ_GLOBAL_IRQ_EN BIT(0)
+#define REG_IRQ_EN 0xe1
+#define REG_IRQ_EN_CHA_SYNCH_ERR_EN BIT(7)
+#define REG_IRQ_EN_CHA_CRC_ERR_EN BIT(6)
+#define REG_IRQ_EN_CHA_UNC_ECC_ERR_EN BIT(5)
+#define REG_IRQ_EN_CHA_COR_ECC_ERR_EN BIT(4)
+#define REG_IRQ_EN_CHA_LLP_ERR_EN BIT(3)
+#define REG_IRQ_EN_CHA_SOT_BIT_ERR_EN BIT(2)
+#define REG_IRQ_EN_CHA_PLL_UNLOCK_EN BIT(0)
+#define REG_IRQ_STAT 0xe5
+#define REG_IRQ_STAT_CHA_SYNCH_ERR BIT(7)
+#define REG_IRQ_STAT_CHA_CRC_ERR BIT(6)
+#define REG_IRQ_STAT_CHA_UNC_ECC_ERR BIT(5)
+#define REG_IRQ_STAT_CHA_COR_ECC_ERR BIT(4)
+#define REG_IRQ_STAT_CHA_LLP_ERR BIT(3)
+#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
+#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
+
+enum sn65dsi83_model {
+ MODEL_SN65DSI83,
+ MODEL_SN65DSI84,
+};
+
+struct sn65dsi83 {
+ struct drm_bridge bridge;
+ struct drm_display_mode mode;
+ struct device *dev;
+ struct regmap *regmap;
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ struct drm_bridge *panel_bridge;
+ struct gpio_desc *enable_gpio;
+ int dsi_lanes;
+ bool lvds_dual_link;
+ bool lvds_dual_link_even_odd_swap;
+ bool lvds_format_24bpp;
+ bool lvds_format_jeida;
+};
+
+static const struct regmap_range sn65dsi83_readable_ranges[] = {
+ regmap_reg_range(REG_ID(0), REG_ID(8)),
+ regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_DSI_CLK),
+ regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+ regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+ regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+ regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+ regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+ REG_VID_CHA_SYNC_DELAY_HIGH),
+ regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+ REG_VID_CHA_VERTICAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ REG_VID_CHA_VERTICAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+ regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_readable_table = {
+ .yes_ranges = sn65dsi83_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_readable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_writeable_ranges[] = {
+ regmap_reg_range(REG_RC_RESET, REG_RC_DSI_CLK),
+ regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+ regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+ regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+ regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+ regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+ REG_VID_CHA_SYNC_DELAY_HIGH),
+ regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+ REG_VID_CHA_VERTICAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ REG_VID_CHA_VERTICAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+ regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_writeable_table = {
+ .yes_ranges = sn65dsi83_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_writeable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_volatile_ranges[] = {
+ regmap_reg_range(REG_RC_RESET, REG_RC_RESET),
+ regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_LVDS_PLL),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_volatile_table = {
+ .yes_ranges = sn65dsi83_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_volatile_ranges),
+};
+
+static const struct regmap_config sn65dsi83_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &sn65dsi83_readable_table,
+ .wr_table = &sn65dsi83_writeable_table,
+ .volatile_table = &sn65dsi83_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = REG_IRQ_STAT,
+};
+
+static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sn65dsi83, bridge);
+}
+
+static int sn65dsi83_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ struct device *dev = ctx->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret = 0;
+
+ const struct mipi_dsi_device_info info = {
+ .type = "sn65dsi83",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(ctx->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
+ }
+
+ ctx->dsi = dsi;
+
+ dsi->lanes = ctx->dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ &ctx->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static void sn65dsi83_pre_enable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /*
+ * Reset the chip, pull EN line low for t_reset=10ms,
+ * then high for t_en=1ms.
+ */
+ regcache_mark_dirty(ctx->regmap);
+ gpiod_set_value(ctx->enable_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value(ctx->enable_gpio, 1);
+ usleep_range(1000, 1100);
+}
+
+static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx)
+{
+ /*
+ * The encoding of the LVDS_CLK_RANGE is as follows:
+ * 000 - 25 MHz <= LVDS_CLK < 37.5 MHz
+ * 001 - 37.5 MHz <= LVDS_CLK < 62.5 MHz
+ * 010 - 62.5 MHz <= LVDS_CLK < 87.5 MHz
+ * 011 - 87.5 MHz <= LVDS_CLK < 112.5 MHz
+ * 100 - 112.5 MHz <= LVDS_CLK < 137.5 MHz
+ * 101 - 137.5 MHz <= LVDS_CLK <= 154 MHz
+ * which is a range of 12.5MHz..162.5MHz in 50MHz steps, except that
+ * the ends of the ranges are clamped to the supported range. Since
+ * sn65dsi83_mode_valid() already filters the valid modes and limits
+ * the clock to 25..154 MHz, the range calculation can be simplified
+ * as follows:
+ */
+ int mode_clock = ctx->mode.clock;
+
+ if (ctx->lvds_dual_link)
+ mode_clock /= 2;
+
+ return (mode_clock - 12500) / 25000;
+}
+
+static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx)
+{
+ /*
+ * The encoding of the CHA_DSI_CLK_RANGE is as follows:
+ * 0x00 through 0x07 - Reserved
+ * 0x08 - 40 <= DSI_CLK < 45 MHz
+ * 0x09 - 45 <= DSI_CLK < 50 MHz
+ * ...
+ * 0x63 - 495 <= DSI_CLK < 500 MHz
+ * 0x64 - 500 MHz
+ * 0x65 through 0xFF - Reserved
+ * which is DSI clock in 5 MHz steps, clamped to 40..500 MHz.
+ * The DSI clock are calculated as:
+ * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
+ * the 2 is there because the bus is DDR.
+ */
+ return DIV_ROUND_UP(clamp((unsigned int)ctx->mode.clock *
+ mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
+ ctx->dsi_lanes / 2, 40000U, 500000U), 5000U);
+}
+
+static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
+{
+ /* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */
+ unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format);
+
+ dsi_div /= ctx->dsi_lanes;
+
+ if (!ctx->lvds_dual_link)
+ dsi_div /= 2;
+
+ return dsi_div - 1;
+}
+
+static void sn65dsi83_enable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ unsigned int pval;
+ u16 val;
+ int ret;
+
+ /* Clear reset, disable PLL */
+ regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+
+ /* Reference clock derived from DSI link clock. */
+ regmap_write(ctx->regmap, REG_RC_LVDS_PLL,
+ REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx)) |
+ REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY);
+ regmap_write(ctx->regmap, REG_DSI_CLK,
+ REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx)));
+ regmap_write(ctx->regmap, REG_RC_DSI_CLK,
+ REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx)));
+
+ /* Set number of DSI lanes and LVDS link config. */
+ regmap_write(ctx->regmap, REG_DSI_LANE,
+ REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE |
+ REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi_lanes - 1)) |
+ /* CHB is DSI85-only, set to default on DSI83/DSI84 */
+ REG_DSI_LANE_CHB_DSI_LANES(3));
+ /* No equalization. */
+ regmap_write(ctx->regmap, REG_DSI_EQ, 0x00);
+
+ /* Set up sync signal polarity. */
+ val = (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC ?
+ REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
+ (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC ?
+ REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+
+ /* Set up bits-per-pixel, 18bpp or 24bpp. */
+ if (ctx->lvds_format_24bpp) {
+ val |= REG_LVDS_FMT_CHA_24BPP_MODE;
+ if (ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_CHB_24BPP_MODE;
+ }
+
+ /* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */
+ if (ctx->lvds_format_jeida) {
+ val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1;
+ if (ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1;
+ }
+
+ /* Set up LVDS output config (DSI84,DSI85) */
+ if (!ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_LVDS_LINK_CFG;
+
+ regmap_write(ctx->regmap, REG_LVDS_FMT, val);
+ regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+ regmap_write(ctx->regmap, REG_LVDS_LANE,
+ (ctx->lvds_dual_link_even_odd_swap ?
+ REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
+ REG_LVDS_LANE_CHA_LVDS_TERM |
+ REG_LVDS_LANE_CHB_LVDS_TERM);
+ regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
+
+ val = cpu_to_le16(ctx->mode.hdisplay);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ &val, 2);
+ val = cpu_to_le16(ctx->mode.vdisplay);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ &val, 2);
+ /* 32 + 1 pixel clock to ensure proper operation */
+ val = cpu_to_le16(32 + 1);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &val, 2);
+ val = cpu_to_le16(ctx->mode.hsync_end - ctx->mode.hsync_start);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ &val, 2);
+ val = cpu_to_le16(ctx->mode.vsync_end - ctx->mode.vsync_start);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ &val, 2);
+ regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ ctx->mode.htotal - ctx->mode.hsync_end);
+ regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
+ ctx->mode.vtotal - ctx->mode.vsync_end);
+ regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ ctx->mode.hsync_start - ctx->mode.hdisplay);
+ regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ ctx->mode.vsync_start - ctx->mode.vdisplay);
+ regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
+
+ /* Enable PLL */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, REG_RC_PLL_EN_PLL_EN);
+ usleep_range(3000, 4000);
+ ret = regmap_read_poll_timeout(ctx->regmap, REG_RC_LVDS_PLL, pval,
+ pval & REG_RC_LVDS_PLL_PLL_EN_STAT,
+ 1000, 100000);
+ if (ret) {
+ dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ /* On failure, disable PLL again and exit. */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+ return;
+ }
+
+ /* Trigger reset after CSR register update. */
+ regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
+
+ /* Clear all errors that got asserted during initialization. */
+ regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
+}
+
+static void sn65dsi83_disable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /* Clear reset, disable PLL */
+ regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+}
+
+static void sn65dsi83_post_disable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /* Put the chip in reset, pull EN line low. */
+ gpiod_set_value(ctx->enable_gpio, 0);
+}
+
+static enum drm_mode_status
+sn65dsi83_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* LVDS output clock range 25..154 MHz */
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void sn65dsi83_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ ctx->mode = *adj;
+}
+
+static bool sn65dsi83_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ u32 input_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *ddev = encoder->dev;
+ struct drm_connector *connector;
+
+ /* The DSI format is always RGB888_1X24 */
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ switch (connector->display_info.bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ ctx->lvds_format_24bpp = false;
+ ctx->lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = false;
+ break;
+ default:
+ /*
+ * Some bridges still don't set the correct
+ * LVDS bus pixel format, use SPWG24 default
+ * format until those are fixed.
+ */
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = false;
+ dev_warn(ctx->dev,
+ "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
+ connector->display_info.bus_formats[0]);
+ break;
+ }
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &input_bus_format, 1);
+ }
+
+ return true;
+}
+
+static const struct drm_bridge_funcs sn65dsi83_funcs = {
+ .attach = sn65dsi83_attach,
+ .pre_enable = sn65dsi83_pre_enable,
+ .enable = sn65dsi83_enable,
+ .disable = sn65dsi83_disable,
+ .post_disable = sn65dsi83_post_disable,
+ .mode_valid = sn65dsi83_mode_valid,
+ .mode_set = sn65dsi83_mode_set,
+ .mode_fixup = sn65dsi83_mode_fixup,
+};
+
+static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
+{
+ struct drm_bridge *panel_bridge;
+ struct device *dev = ctx->dev;
+ struct device_node *endpoint;
+ struct drm_panel *panel;
+ int ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ ctx->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ ctx->host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+
+ if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
+ return -EINVAL;
+ if (!ctx->host_node)
+ return -ENODEV;
+
+ ctx->lvds_dual_link = false;
+ ctx->lvds_dual_link_even_odd_swap = false;
+ if (model != MODEL_SN65DSI83) {
+ struct device_node *port2, *port3;
+ int dual_link;
+
+ port2 = of_graph_get_port_by_id(dev->of_node, 2);
+ port3 = of_graph_get_port_by_id(dev->of_node, 3);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
+ of_node_put(port2);
+ of_node_put(port3);
+
+ if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Odd pixels to LVDS Channel A, even pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = false;
+ } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Even pixels to LVDS Channel A, odd pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = true;
+ }
+ }
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
+ if (ret < 0)
+ return ret;
+ if (panel) {
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+
+ ctx->panel_bridge = panel_bridge;
+
+ return 0;
+}
+
+static int sn65dsi83_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ enum sn65dsi83_model model;
+ struct sn65dsi83 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (dev->of_node) {
+ model = (enum sn65dsi83_model)(uintptr_t)
+ of_device_get_match_data(dev);
+ } else {
+ model = id->driver_data;
+ }
+
+ ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->enable_gpio))
+ return PTR_ERR(ctx->enable_gpio);
+
+ ret = sn65dsi83_parse_dt(ctx, model);
+ if (ret)
+ return ret;
+
+ ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ dev_set_drvdata(dev, ctx);
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sn65dsi83_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ return 0;
+}
+
+static int sn65dsi83_remove(struct i2c_client *client)
+{
+ struct sn65dsi83 *ctx = i2c_get_clientdata(client);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+ drm_bridge_remove(&ctx->bridge);
+ of_node_put(ctx->host_node);
+
+ return 0;
+}
+
+static struct i2c_device_id sn65dsi83_id[] = {
+ { "ti,sn65dsi83", MODEL_SN65DSI83 },
+ { "ti,sn65dsi84", MODEL_SN65DSI84 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sn65dsi83_id);
+
+static const struct of_device_id sn65dsi83_match_table[] = {
+ { .compatible = "ti,sn65dsi83", .data = (void *)MODEL_SN65DSI83 },
+ { .compatible = "ti,sn65dsi84", .data = (void *)MODEL_SN65DSI84 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
+
+static struct i2c_driver sn65dsi83_driver = {
+ .probe = sn65dsi83_probe,
+ .remove = sn65dsi83_remove,
+ .id_table = sn65dsi83_id,
+ .driver = {
+ .name = "sn65dsi83",
+ .of_match_table = sn65dsi83_match_table,
+ },
+};
+module_i2c_driver(sn65dsi83_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("TI SN65DSI83 DSI to LVDS bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 79a50ef1250f..546599f19a93 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,6 +28,7 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
+#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/mem_encrypt.h>
@@ -35,6 +36,9 @@
#include <drm/drm_cache.h>
+/* A small bounce buffer that fits on the stack. */
+#define MEMCPY_BOUNCE_SIZE 128
+
#if defined(CONFIG_X86)
#include <asm/smp.h>
@@ -209,3 +213,147 @@ bool drm_need_swiotlb(int dma_bits)
return max_iomem > ((u64)1 << dma_bits);
}
EXPORT_SYMBOL(drm_need_swiotlb);
+
+static void memcpy_fallback(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ if (!dst->is_iomem && !src->is_iomem) {
+ memcpy(dst->vaddr, src->vaddr, len);
+ } else if (!src->is_iomem) {
+ dma_buf_map_memcpy_to(dst, src->vaddr, len);
+ } else if (!dst->is_iomem) {
+ memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
+ } else {
+ /*
+ * Bounce size is not performance tuned, but using a
+ * bounce buffer like this is significantly faster than
+ * resorting to ioreadxx() + iowritexx().
+ */
+ char bounce[MEMCPY_BOUNCE_SIZE];
+ void __iomem *_src = src->vaddr_iomem;
+ void __iomem *_dst = dst->vaddr_iomem;
+
+ while (len >= MEMCPY_BOUNCE_SIZE) {
+ memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+ memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+ _src += MEMCPY_BOUNCE_SIZE;
+ _dst += MEMCPY_BOUNCE_SIZE;
+ len -= MEMCPY_BOUNCE_SIZE;
+ }
+ if (len) {
+ memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+ memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+ }
+ }
+}
+
+#ifdef CONFIG_X86
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movaps %%xmm0, (%1)\n"
+ "movaps %%xmm1, 16(%1)\n"
+ "movaps %%xmm2, 32(%1)\n"
+ "movaps %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movaps %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+
+/*
+ * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ */
+static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+ if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+ memcpy(dst, src, len);
+ else if (likely(len))
+ __memcpy_ntdqa(dst, src, len >> 4);
+}
+
+/**
+ * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
+ * that may be WC.
+ * @dst: The destination pointer
+ * @src: The source pointer
+ * @len: The size of the area o transfer in bytes
+ *
+ * Tries an arch optimized memcpy for prefetching reading out of a WC region,
+ * and if no such beast is available, falls back to a normal memcpy.
+ */
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ if (WARN_ON(in_interrupt())) {
+ memcpy_fallback(dst, src, len);
+ return;
+ }
+
+ if (static_branch_likely(&has_movntdqa)) {
+ __drm_memcpy_from_wc(dst->is_iomem ?
+ (void __force *)dst->vaddr_iomem :
+ dst->vaddr,
+ src->is_iomem ?
+ (void const __force *)src->vaddr_iomem :
+ src->vaddr,
+ len);
+ return;
+ }
+
+ memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+/*
+ * drm_memcpy_init_early - One time initialization of the WC memcpy code
+ */
+void drm_memcpy_init_early(void)
+{
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_enable(&has_movntdqa);
+}
+#else
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ WARN_ON(in_interrupt());
+
+ memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+void drm_memcpy_init_early(void)
+{
+}
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3d8d68a98b95..8804ec7d3215 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <drm/drm_cache.h>
#include <drm/drm_client.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
@@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
drm_connector_ida_init();
idr_init(&drm_minors_idr);
+ drm_memcpy_init_early();
ret = drm_sysfs_init();
if (ret < 0) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9989425e9875..d62fb1a3c916 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
- true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
@@ -1375,12 +1374,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
if (!write) {
struct dma_fence *fence =
- dma_resv_get_excl_rcu(obj->resv);
+ dma_resv_get_excl_unlocked(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index a005c5a0ba46..a27135084ae5 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- fence = dma_resv_get_excl_rcu(obj->resv);
+ fence = dma_resv_get_excl_unlocked(obj->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4a24af2c5c43..d53388199f34 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -104,8 +104,7 @@ error:
* @size: size of the object to allocate
*
* This function creates a CMA GEM object and allocates a contiguous chunk of
- * memory as backing store. The backing memory has the writecombine attribute
- * set.
+ * memory as backing store.
*
* Returns:
* A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index b14bed8be771..ecf3d2a54a98 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
drm_printf_indent(p, indent, "placement=");
- drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n");
- if (bo->mem.bus.is_iomem)
+ if (bo->resource->bus.is_iomem)
drm_printf_indent(p, indent, "bus.offset=%lx\n",
- (unsigned long)bo->mem.bus.offset);
+ (unsigned long)bo->resource->bus.offset);
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 797200315854..2a1229b8364e 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -17,6 +17,8 @@
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/ttm/ttm_range_manager.h>
+
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
@@ -248,10 +250,11 @@ EXPORT_SYMBOL(drm_gem_vram_put);
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
- if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
+ if (WARN_ON_ONCE(!gbo->bo.resource ||
+ gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
return 0;
- return gbo->bo.mem.start;
+ return gbo->bo.resource->start;
}
/**
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index db69f19ab5bc..b8fa6ed3dd73 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled_rcu(obj->resv,
- write))
+ if (!dma_resv_test_signaled(obj->resv, write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout_rcu(obj->resv,
- write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
@@ -461,7 +459,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
+ fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
@@ -471,7 +469,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
etnaviv_gem_describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index d05c35994579..d53856d7a747 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences_rcu(robj, &bo->excl,
- &bo->nr_shared,
- &bo->shared);
+ ret = dma_resv_get_fences(robj, &bo->excl,
+ &bo->nr_shared,
+ &bo->shared);
if (ret)
return ret;
} else {
- bo->excl = dma_resv_get_excl_rcu(robj);
+ bo->excl = dma_resv_get_excl_unlocked(robj);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 431c5d32f9a4..9b5e6f94e558 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -344,7 +344,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
}
/**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: display and enhancement controller context
* @win: window to protect registers for
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 4f2b7551b251..9ae868935357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -88,7 +88,7 @@ void exynos_drm_ipp_unregister(struct device *dev,
}
/**
- * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
+ * exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
@@ -136,7 +136,7 @@ static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
}
/**
- * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
+ * exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
diff --git a/drivers/gpu/drm/hyperv/Makefile b/drivers/gpu/drm/hyperv/Makefile
new file mode 100644
index 000000000000..265f12f2c660
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+hyperv_drm-y := \
+ hyperv_drm_drv.o \
+ hyperv_drm_modeset.o \
+ hyperv_drm_proto.o
+
+obj-$(CONFIG_DRM_HYPERV) += hyperv_drm.o
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
new file mode 100644
index 000000000000..886add4f9cd0
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#ifndef _HYPERV_DRM_H_
+#define _HYPERV_DRM_H_
+
+#define VMBUS_MAX_PACKET_SIZE 0x4000
+
+struct hyperv_drm_device {
+ /* drm */
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+
+ /* mode */
+ u32 screen_width_max;
+ u32 screen_height_max;
+ u32 preferred_width;
+ u32 preferred_height;
+ u32 screen_depth;
+
+ /* hw */
+ struct resource *mem;
+ void __iomem *vram;
+ unsigned long fb_base;
+ unsigned long fb_size;
+ struct completion wait;
+ u32 synthvid_version;
+ u32 mmio_megabytes;
+ bool dirt_needed;
+
+ u8 init_buf[VMBUS_MAX_PACKET_SIZE];
+ u8 recv_buf[VMBUS_MAX_PACKET_SIZE];
+
+ struct hv_device *hdev;
+};
+
+#define to_hv(_dev) container_of(_dev, struct hyperv_drm_device, dev)
+
+/* hyperv_drm_modeset */
+int hyperv_mode_config_init(struct hyperv_drm_device *hv);
+
+/* hyperv_drm_proto */
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+ u32 w, u32 h, u32 pitch);
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
+int hyperv_connect_vsp(struct hv_device *hdev);
+
+#endif
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
new file mode 100644
index 000000000000..eb06c92c4bfd
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/efi.h>
+#include <linux/hyperv.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define DRIVER_NAME "hyperv_drm"
+#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
+#define DRIVER_DATE "2020"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
+DEFINE_DRM_GEM_FOPS(hv_fops);
+
+static struct drm_driver hyperv_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &hv_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int hyperv_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return 0;
+}
+
+static void hyperv_pci_remove(struct pci_dev *pdev)
+{
+}
+
+static const struct pci_device_id hyperv_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_MICROSOFT,
+ .device = PCI_DEVICE_ID_HYPERV_VIDEO,
+ },
+ { /* end of list */ }
+};
+
+/*
+ * PCI stub to support gen1 VM.
+ */
+static struct pci_driver hyperv_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hyperv_pci_tbl,
+ .probe = hyperv_pci_probe,
+ .remove = hyperv_pci_remove,
+};
+
+static int hyperv_setup_gen1(struct hyperv_drm_device *hv)
+{
+ struct drm_device *dev = &hv->dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev) {
+ drm_err(dev, "Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
+
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hypervdrmfb");
+ if (ret) {
+ drm_err(dev, "Not able to remove boot fb\n");
+ return ret;
+ }
+
+ if (pci_request_region(pdev, 0, DRIVER_NAME) != 0)
+ drm_warn(dev, "Cannot request framebuffer, boot fb still active?\n");
+
+ if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) {
+ drm_err(dev, "Resource at bar 0 is not IORESOURCE_MEM\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hv->fb_base = pci_resource_start(pdev, 0);
+ hv->fb_size = pci_resource_len(pdev, 0);
+ if (!hv->fb_base) {
+ drm_err(dev, "Resource not available\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hv->fb_size = min(hv->fb_size,
+ (unsigned long)(hv->mmio_megabytes * 1024 * 1024));
+ hv->vram = devm_ioremap(&pdev->dev, hv->fb_base, hv->fb_size);
+ if (!hv->vram) {
+ drm_err(dev, "Failed to map vram\n");
+ ret = -ENOMEM;
+ }
+
+error:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static int hyperv_setup_gen2(struct hyperv_drm_device *hv,
+ struct hv_device *hdev)
+{
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
+ screen_info.lfb_size,
+ false,
+ "hypervdrmfb");
+
+ hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
+
+ ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
+ true);
+ if (ret) {
+ drm_err(dev, "Failed to allocate mmio\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Map the VRAM cacheable for performance. This is also required for VM
+ * connect to display properly for ARM64 Linux VM, as the host also maps
+ * the VRAM cacheable.
+ */
+ hv->vram = ioremap_cache(hv->mem->start, hv->fb_size);
+ if (!hv->vram) {
+ drm_err(dev, "Failed to map vram\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ hv->fb_base = hv->mem->start;
+ return 0;
+
+error:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ return ret;
+}
+
+static int hyperv_vmbus_probe(struct hv_device *hdev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hyperv_drm_device *hv;
+ struct drm_device *dev;
+ int ret;
+
+ hv = devm_drm_dev_alloc(&hdev->device, &hyperv_driver,
+ struct hyperv_drm_device, dev);
+ if (IS_ERR(hv))
+ return PTR_ERR(hv);
+
+ dev = &hv->dev;
+ init_completion(&hv->wait);
+ hv_set_drvdata(hdev, hv);
+ hv->hdev = hdev;
+
+ ret = hyperv_connect_vsp(hdev);
+ if (ret) {
+ drm_err(dev, "Failed to connect to vmbus.\n");
+ goto err_hv_set_drv_data;
+ }
+
+ if (efi_enabled(EFI_BOOT))
+ ret = hyperv_setup_gen2(hv, hdev);
+ else
+ ret = hyperv_setup_gen1(hv);
+
+ if (ret)
+ goto err_vmbus_close;
+
+ /*
+ * Should be done only once during init and resume. Failing to update
+ * vram location is not fatal. Device will update dirty area till
+ * preferred resolution only.
+ */
+ ret = hyperv_update_vram_location(hdev, hv->fb_base);
+ if (ret)
+ drm_warn(dev, "Failed to update vram location.\n");
+
+ hv->dirt_needed = true;
+
+ ret = hyperv_mode_config_init(hv);
+ if (ret)
+ goto err_vmbus_close;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret) {
+ drm_err(dev, "Failed to register drm driver.\n");
+ goto err_vmbus_close;
+ }
+
+ drm_fbdev_generic_setup(dev, 0);
+
+ return 0;
+
+err_vmbus_close:
+ vmbus_close(hdev->channel);
+err_hv_set_drv_data:
+ hv_set_drvdata(hdev, NULL);
+ return ret;
+}
+
+static int hyperv_vmbus_remove(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ struct hyperv_drm_device *hv = to_hv(dev);
+
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
+ vmbus_close(hdev->channel);
+ hv_set_drvdata(hdev, NULL);
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+ return 0;
+}
+
+static int hyperv_vmbus_suspend(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ int ret;
+
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
+ return 0;
+}
+
+static int hyperv_vmbus_resume(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ struct hyperv_drm_device *hv = to_hv(dev);
+ int ret;
+
+ ret = hyperv_connect_vsp(hdev);
+ if (ret)
+ return ret;
+
+ ret = hyperv_update_vram_location(hdev, hv->fb_base);
+ if (ret)
+ return ret;
+
+ return drm_mode_config_helper_resume(dev);
+}
+
+static const struct hv_vmbus_device_id hyperv_vmbus_tbl[] = {
+ /* Synthetic Video Device GUID */
+ {HV_SYNTHVID_GUID},
+ {}
+};
+
+static struct hv_driver hyperv_hv_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hyperv_vmbus_tbl,
+ .probe = hyperv_vmbus_probe,
+ .remove = hyperv_vmbus_remove,
+ .suspend = hyperv_vmbus_suspend,
+ .resume = hyperv_vmbus_resume,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init hyperv_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&hyperv_pci_driver);
+ if (ret != 0)
+ return ret;
+
+ return vmbus_driver_register(&hyperv_hv_driver);
+}
+
+static void __exit hyperv_exit(void)
+{
+ vmbus_driver_unregister(&hyperv_hv_driver);
+ pci_unregister_driver(&hyperv_pci_driver);
+}
+
+module_init(hyperv_init);
+module_exit(hyperv_exit);
+
+MODULE_DEVICE_TABLE(pci, hyperv_pci_tbl);
+MODULE_DEVICE_TABLE(vmbus, hyperv_vmbus_tbl);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Deepak Rawat <drawat.floss@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Hyper-V synthetic video device");
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
new file mode 100644
index 000000000000..02718e3e859e
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
+ const struct dma_buf_map *map,
+ struct drm_rect *rect)
+{
+ struct hyperv_drm_device *hv = to_hv(fb->dev);
+ void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ int idx;
+
+ if (!drm_dev_enter(&hv->dev, &idx))
+ return -ENODEV;
+
+ drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect);
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
+}
+
+static int hyperv_connector_get_modes(struct drm_connector *connector)
+{
+ struct hyperv_drm_device *hv = to_hv(connector->dev);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, hv->preferred_width,
+ hv->preferred_height);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs hyperv_connector_helper_funcs = {
+ .get_modes = hyperv_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hyperv_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hyperv_conn_init(struct hyperv_drm_device *hv)
+{
+ drm_connector_helper_add(&hv->connector, &hyperv_connector_helper_funcs);
+ return drm_connector_init(&hv->dev, &hv->connector,
+ &hyperv_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
+ struct drm_framebuffer *fb)
+{
+ u32 pitch = w * (hv->screen_depth / 8);
+
+ if (fb)
+ pitch = fb->pitches[0];
+
+ if (pitch * h > hv->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+
+ hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
+ crtc_state->mode.hdisplay,
+ crtc_state->mode.vdisplay,
+ plane_state->fb->pitches[0]);
+ hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->map[0]);
+}
+
+static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (fb->format->format != DRM_FORMAT_XRGB8888)
+ return -EINVAL;
+
+ if (fb->pitches[0] * fb->height > hv->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_rect rect;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
+ hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->map[0], &rect);
+ hyperv_update_dirt(hv->hdev, &rect);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
+ .enable = hyperv_pipe_enable,
+ .check = hyperv_pipe_check,
+ .update = hyperv_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+};
+
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
+{
+ int ret;
+
+ ret = drm_simple_display_pipe_init(&hv->dev,
+ &hv->pipe,
+ &hyperv_pipe_funcs,
+ hyperv_formats,
+ ARRAY_SIZE(hyperv_formats),
+ NULL,
+ &hv->connector);
+ if (ret)
+ return ret;
+
+ drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+
+ return 0;
+}
+
+static enum drm_mode_status
+hyperv_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct hyperv_drm_device *hv = to_hv(dev);
+
+ if (hyperv_check_size(hv, mode->hdisplay, mode->vdisplay, NULL))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs hyperv_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = hyperv_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int hyperv_mode_config_init(struct hyperv_drm_device *hv)
+{
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret) {
+ drm_err(dev, "Failed to initialized mode setting.\n");
+ return ret;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = hv->screen_width_max;
+ dev->mode_config.max_height = hv->screen_height_max;
+
+ dev->mode_config.preferred_depth = hv->screen_depth;
+ dev->mode_config.prefer_shadow = 0;
+
+ dev->mode_config.funcs = &hyperv_mode_config_funcs;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ ret = hyperv_pipe_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized pipe.\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
new file mode 100644
index 000000000000..6d4bdccfbd1a
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ *
+ * Portions of this code is derived from hyperv_fb.c
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define VMBUS_RING_BUFSIZE (256 * 1024)
+#define VMBUS_VSP_TIMEOUT (10 * HZ)
+
+#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
+#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
+#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
+#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
+#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
+#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
+
+#define SYNTHVID_DEPTH_WIN7 16
+#define SYNTHVID_DEPTH_WIN8 32
+#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
+#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
+#define SYNTHVID_WIDTH_MAX_WIN7 1600
+#define SYNTHVID_HEIGHT_MAX_WIN7 1200
+
+enum pipe_msg_type {
+ PIPE_MSG_INVALID,
+ PIPE_MSG_DATA,
+ PIPE_MSG_MAX
+};
+
+enum synthvid_msg_type {
+ SYNTHVID_ERROR = 0,
+ SYNTHVID_VERSION_REQUEST = 1,
+ SYNTHVID_VERSION_RESPONSE = 2,
+ SYNTHVID_VRAM_LOCATION = 3,
+ SYNTHVID_VRAM_LOCATION_ACK = 4,
+ SYNTHVID_SITUATION_UPDATE = 5,
+ SYNTHVID_SITUATION_UPDATE_ACK = 6,
+ SYNTHVID_POINTER_POSITION = 7,
+ SYNTHVID_POINTER_SHAPE = 8,
+ SYNTHVID_FEATURE_CHANGE = 9,
+ SYNTHVID_DIRT = 10,
+ SYNTHVID_RESOLUTION_REQUEST = 13,
+ SYNTHVID_RESOLUTION_RESPONSE = 14,
+
+ SYNTHVID_MAX = 15
+};
+
+struct pipe_msg_hdr {
+ u32 type;
+ u32 size; /* size of message after this field */
+} __packed;
+
+struct hvd_screen_info {
+ u16 width;
+ u16 height;
+} __packed;
+
+struct synthvid_msg_hdr {
+ u32 type;
+ u32 size; /* size of this header + payload after this field */
+} __packed;
+
+struct synthvid_version_req {
+ u32 version;
+} __packed;
+
+struct synthvid_version_resp {
+ u32 version;
+ u8 is_accepted;
+ u8 max_video_outputs;
+} __packed;
+
+struct synthvid_vram_location {
+ u64 user_ctx;
+ u8 is_vram_gpa_specified;
+ u64 vram_gpa;
+} __packed;
+
+struct synthvid_vram_location_ack {
+ u64 user_ctx;
+} __packed;
+
+struct video_output_situation {
+ u8 active;
+ u32 vram_offset;
+ u8 depth_bits;
+ u32 width_pixels;
+ u32 height_pixels;
+ u32 pitch_bytes;
+} __packed;
+
+struct synthvid_situation_update {
+ u64 user_ctx;
+ u8 video_output_count;
+ struct video_output_situation video_output[1];
+} __packed;
+
+struct synthvid_situation_update_ack {
+ u64 user_ctx;
+} __packed;
+
+struct synthvid_pointer_position {
+ u8 is_visible;
+ u8 video_output;
+ s32 image_x;
+ s32 image_y;
+} __packed;
+
+#define SYNTHVID_CURSOR_MAX_X 96
+#define SYNTHVID_CURSOR_MAX_Y 96
+#define SYNTHVID_CURSOR_ARGB_PIXEL_SIZE 4
+#define SYNTHVID_CURSOR_MAX_SIZE (SYNTHVID_CURSOR_MAX_X * \
+ SYNTHVID_CURSOR_MAX_Y * SYNTHVID_CURSOR_ARGB_PIXEL_SIZE)
+#define SYNTHVID_CURSOR_COMPLETE (-1)
+
+struct synthvid_pointer_shape {
+ u8 part_idx;
+ u8 is_argb;
+ u32 width; /* SYNTHVID_CURSOR_MAX_X at most */
+ u32 height; /* SYNTHVID_CURSOR_MAX_Y at most */
+ u32 hot_x; /* hotspot relative to upper-left of pointer image */
+ u32 hot_y;
+ u8 data[4];
+} __packed;
+
+struct synthvid_feature_change {
+ u8 is_dirt_needed;
+ u8 is_ptr_pos_needed;
+ u8 is_ptr_shape_needed;
+ u8 is_situ_needed;
+} __packed;
+
+struct rect {
+ s32 x1, y1; /* top left corner */
+ s32 x2, y2; /* bottom right corner, exclusive */
+} __packed;
+
+struct synthvid_dirt {
+ u8 video_output;
+ u8 dirt_count;
+ struct rect rect[1];
+} __packed;
+
+#define SYNTHVID_EDID_BLOCK_SIZE 128
+#define SYNTHVID_MAX_RESOLUTION_COUNT 64
+
+struct synthvid_supported_resolution_req {
+ u8 maximum_resolution_count;
+} __packed;
+
+struct synthvid_supported_resolution_resp {
+ u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
+ u8 resolution_count;
+ u8 default_resolution_index;
+ u8 is_standard;
+ struct hvd_screen_info supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
+} __packed;
+
+struct synthvid_msg {
+ struct pipe_msg_hdr pipe_hdr;
+ struct synthvid_msg_hdr vid_hdr;
+ union {
+ struct synthvid_version_req ver_req;
+ struct synthvid_version_resp ver_resp;
+ struct synthvid_vram_location vram;
+ struct synthvid_vram_location_ack vram_ack;
+ struct synthvid_situation_update situ;
+ struct synthvid_situation_update_ack situ_ack;
+ struct synthvid_pointer_position ptr_pos;
+ struct synthvid_pointer_shape ptr_shape;
+ struct synthvid_feature_change feature_chg;
+ struct synthvid_dirt dirt;
+ struct synthvid_supported_resolution_req resolution_req;
+ struct synthvid_supported_resolution_resp resolution_resp;
+ };
+} __packed;
+
+static inline bool hyperv_version_ge(u32 ver1, u32 ver2)
+{
+ if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
+ (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
+ SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
+ return true;
+
+ return false;
+}
+
+static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg *msg)
+{
+ static atomic64_t request_id = ATOMIC64_INIT(0);
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ int ret;
+
+ msg->pipe_hdr.type = PIPE_MSG_DATA;
+ msg->pipe_hdr.size = msg->vid_hdr.size;
+
+ ret = vmbus_sendpacket(hdev->channel, msg,
+ msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
+ atomic64_inc_return(&request_id),
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret)
+ drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+
+ return ret;
+}
+
+static int hyperv_negotiate_version(struct hv_device *hdev, u32 ver)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_version_req);
+ msg->ver_req.version = ver;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting version response\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!msg->ver_resp.is_accepted) {
+ drm_err(dev, "Version request not accepted\n");
+ return -ENODEV;
+ }
+
+ hv->synthvid_version = ver;
+ drm_info(dev, "Synthvid Version major %d, minor %d\n",
+ SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
+
+ return 0;
+}
+
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_vram_location);
+ msg->vram.user_ctx = vram_pp;
+ msg->vram.vram_gpa = vram_pp;
+ msg->vram.is_vram_gpa_specified = 1;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting vram location ack\n");
+ return -ETIMEDOUT;
+ }
+ if (msg->vram_ack.user_ctx != vram_pp) {
+ drm_err(dev, "Unable to set VRAM location\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+ u32 w, u32 h, u32 pitch)
+{
+ struct synthvid_msg msg;
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+
+ msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_situation_update);
+ msg.situ.user_ctx = 0;
+ msg.situ.video_output_count = 1;
+ msg.situ.video_output[0].active = active;
+ /* vram_offset should always be 0 */
+ msg.situ.video_output[0].vram_offset = 0;
+ msg.situ.video_output[0].depth_bits = bpp;
+ msg.situ.video_output[0].width_pixels = w;
+ msg.situ.video_output[0].height_pixels = h;
+ msg.situ.video_output[0].pitch_bytes = pitch;
+
+ hyperv_sendpacket(hdev, &msg);
+
+ return 0;
+}
+
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg msg;
+
+ if (!hv->dirt_needed)
+ return 0;
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+
+ msg.vid_hdr.type = SYNTHVID_DIRT;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_dirt);
+ msg.dirt.video_output = 0;
+ msg.dirt.dirt_count = 1;
+ msg.dirt.rect[0].x1 = rect->x1;
+ msg.dirt.rect[0].y1 = rect->y1;
+ msg.dirt.rect[0].x2 = rect->x2;
+ msg.dirt.rect[0].y2 = rect->y2;
+
+ hyperv_sendpacket(hdev, &msg);
+
+ return 0;
+}
+
+static int hyperv_get_supported_resolution(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+ u8 index;
+ int i;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_supported_resolution_req);
+ msg->resolution_req.maximum_resolution_count =
+ SYNTHVID_MAX_RESOLUTION_COUNT;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting resolution response\n");
+ return -ETIMEDOUT;
+ }
+
+ if (msg->resolution_resp.resolution_count == 0) {
+ drm_err(dev, "No supported resolutions\n");
+ return -ENODEV;
+ }
+
+ index = msg->resolution_resp.default_resolution_index;
+ if (index >= msg->resolution_resp.resolution_count) {
+ drm_err(dev, "Invalid resolution index: %d\n", index);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+ hv->screen_width_max = max_t(u32, hv->screen_width_max,
+ msg->resolution_resp.supported_resolution[i].width);
+ hv->screen_height_max = max_t(u32, hv->screen_height_max,
+ msg->resolution_resp.supported_resolution[i].height);
+ }
+
+ hv->preferred_width =
+ msg->resolution_resp.supported_resolution[index].width;
+ hv->preferred_height =
+ msg->resolution_resp.supported_resolution[index].height;
+
+ return 0;
+}
+
+static void hyperv_receive_sub(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg;
+
+ if (!hv)
+ return;
+
+ msg = (struct synthvid_msg *)hv->recv_buf;
+
+ /* Complete the wait event */
+ if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
+ memcpy(hv->init_buf, msg, VMBUS_MAX_PACKET_SIZE);
+ complete(&hv->wait);
+ return;
+ }
+
+ if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+ hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+}
+
+static void hyperv_receive(void *ctx)
+{
+ struct hv_device *hdev = ctx;
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *recv_buf;
+ u32 bytes_recvd;
+ u64 req_id;
+ int ret;
+
+ if (!hv)
+ return;
+
+ recv_buf = (struct synthvid_msg *)hv->recv_buf;
+
+ do {
+ ret = vmbus_recvpacket(hdev->channel, recv_buf,
+ VMBUS_MAX_PACKET_SIZE,
+ &bytes_recvd, &req_id);
+ if (bytes_recvd > 0 &&
+ recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
+ hyperv_receive_sub(hdev);
+ } while (bytes_recvd > 0 && ret == 0);
+}
+
+int hyperv_connect_vsp(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ ret = vmbus_open(hdev->channel, VMBUS_RING_BUFSIZE, VMBUS_RING_BUFSIZE,
+ NULL, 0, hyperv_receive, hdev);
+ if (ret) {
+ drm_err(dev, "Unable to open vmbus channel\n");
+ return ret;
+ }
+
+ /* Negotiate the protocol version with host */
+ switch (vmbus_proto_version) {
+ case VERSION_WIN10:
+ case VERSION_WIN10_V5:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+ if (!ret)
+ break;
+ fallthrough;
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
+ if (!ret)
+ break;
+ fallthrough;
+ case VERSION_WS2008:
+ case VERSION_WIN7:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
+ break;
+ default:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+ break;
+ }
+
+ if (ret) {
+ drm_err(dev, "Synthetic video device version not accepted %d\n", ret);
+ goto error;
+ }
+
+ if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
+ hv->screen_depth = SYNTHVID_DEPTH_WIN7;
+ else
+ hv->screen_depth = SYNTHVID_DEPTH_WIN8;
+
+ if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
+ ret = hyperv_get_supported_resolution(hdev);
+ if (ret)
+ drm_err(dev, "Failed to get supported resolution from host, use default\n");
+ } else {
+ hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
+ hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
+ }
+
+ hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
+
+ return 0;
+
+error:
+ vmbus_close(hdev->channel);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d0d936d9137b..6947495bf34b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -201,10 +201,10 @@ i915-y += \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
- display/intel_csr.o \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
+ display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
@@ -263,6 +263,7 @@ i915-y += \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pps.o \
+ display/intel_qp_tables.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ce544e20f35c..16812488c5dd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -363,10 +363,19 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
int afe_clk_khz;
- u32 esc_clk_div_m;
+ int theo_word_clk, act_word_clk;
+ u32 esc_clk_div_m, esc_clk_div_m_phy;
afe_clk_khz = afe_clk(encoder, crtc_state);
- esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK);
+ act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2);
+ esc_clk_div_m = act_word_clk * 8;
+ esc_clk_div_m_phy = (act_word_clk - 1) / 2;
+ } else {
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+ }
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
@@ -379,6 +388,14 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
}
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8),
+ esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
+ intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8));
+ }
+ }
}
static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 88f424020a5f..b4e7ac51aa31 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -187,6 +187,26 @@ intel_connector_needs_modeset(struct intel_atomic_state *state,
new_conn_state->crtc)));
}
+/**
+ * intel_any_crtc_needs_modeset - check if any CRTC needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ *
+ * Returns true if any CRTC in @state needs a modeset.
+ */
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (intel_crtc_needs_modeset(crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_digital_connector_state *
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 62a3365ed5e6..d2700c74c9da 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -35,6 +35,7 @@ struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
bool intel_connector_needs_modeset(struct intel_atomic_state *state,
struct drm_connector *connector);
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state);
struct intel_digital_connector_state *
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index a35435083b60..bfb398f0432e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -162,7 +162,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = dev_priv->dram_info.num_channels;
+ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -267,7 +267,7 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 4656a6edc3be..613ffcc68eba 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -28,6 +28,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_psr.h"
#include "intel_sideband.h"
/**
@@ -1547,6 +1548,35 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
+static bool has_cdclk_crawl(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->has_cdclk_crawl;
+}
+
+static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ u32 val;
+
+ /* Write PLL ratio without disabling */
+ val = CNL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Submit freq change request */
+ val |= BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ DRM_ERROR("timeout waiting for FREQ change request ack\n");
+
+ val &= ~BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ dev_priv->cdclk.hw.vco = vco;
+}
+
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -1619,14 +1649,16 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
+ if (has_cdclk_crawl(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->cdclk.hw.vco != vco)
+ adlp_cdclk_pll_crawl(dev_priv, vco);
+ } else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_disable(dev_priv);
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
-
} else {
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
@@ -1819,6 +1851,28 @@ void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
skl_cdclk_uninit_hw(i915);
}
+static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ int a_div, b_div;
+
+ if (!has_cdclk_crawl(dev_priv))
+ return false;
+
+ /*
+ * The vco and cd2x divider will change independently
+ * from each, so we disallow cd2x change when crawling.
+ */
+ a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk);
+ b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk);
+
+ return a->vco != 0 && b->vco != 0 &&
+ a->vco != b->vco &&
+ a_div == b_div &&
+ a->ref == b->ref;
+}
+
/**
* intel_cdclk_needs_modeset - Determine if changong between the CDCLK
* configurations requires a modeset on all pipes
@@ -1908,6 +1962,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_pause(intel_dp);
+ }
+
/*
* Lock aux/gmbus while we change cdclk in case those
* functions use cdclk. Not all platforms/ports do,
@@ -1930,6 +1990,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
mutex_unlock(&dev_priv->gmbus_mutex);
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ }
+
if (drm_WARN(&dev_priv->drm,
intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
@@ -2462,7 +2528,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_cdclk_state *old_cdclk_state;
struct intel_cdclk_state *new_cdclk_state;
- enum pipe pipe;
+ enum pipe pipe = INVALID_PIPE;
int ret;
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -2514,15 +2580,18 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
}
- if (pipe != INVALID_PIPE) {
+ if (intel_cdclk_can_crawl(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via crawl\n");
+ } else if (pipe != INVALID_PIPE) {
new_cdclk_state->pipe = pipe;
drm_dbg_kms(&dev_priv->drm,
- "Can change cdclk with pipe %c active\n",
+ "Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
@@ -2531,8 +2600,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
- new_cdclk_state->pipe = INVALID_PIPE;
-
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
deleted file mode 100644
index 03c64f8af7ab..000000000000
--- a/drivers/gpu/drm/i915/display/intel_csr.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CSR_H__
-#define __INTEL_CSR_H__
-
-struct drm_i915_private;
-
-#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
-#define CSR_VERSION_MAJOR(version) ((version) >> 16)
-#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
-
-void intel_csr_ucode_init(struct drm_i915_private *i915);
-void intel_csr_load_program(struct drm_i915_private *i915);
-void intel_csr_ucode_fini(struct drm_i915_private *i915);
-void intel_csr_ucode_suspend(struct drm_i915_private *i915);
-void intel_csr_ucode_resume(struct drm_i915_private *i915);
-
-#endif /* __INTEL_CSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2ab389b38694..966e020331fb 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -383,6 +383,10 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
cntl |= MCURSOR_ROTATE_180;
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ cntl |= MCURSOR_ARB_SLOTS(1);
+
return cntl;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index eccbdd42d223..390869bd6b63 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -249,15 +249,48 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
}
}
+static u32 ddi_buf_phy_link_rate(int port_clock)
+{
+ switch (port_clock) {
+ case 162000:
+ return DDI_BUF_PHY_LINK_RATE(0);
+ case 216000:
+ return DDI_BUF_PHY_LINK_RATE(4);
+ case 243000:
+ return DDI_BUF_PHY_LINK_RATE(5);
+ case 270000:
+ return DDI_BUF_PHY_LINK_RATE(1);
+ case 324000:
+ return DDI_BUF_PHY_LINK_RATE(6);
+ case 432000:
+ return DDI_BUF_PHY_LINK_RATE(7);
+ case 540000:
+ return DDI_BUF_PHY_LINK_RATE(2);
+ case 810000:
+ return DDI_BUF_PHY_LINK_RATE(3);
+ default:
+ MISSING_CASE(port_clock);
+ return DDI_BUF_PHY_LINK_RATE(0);
+ }
+}
+
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
intel_dp->DP = dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
}
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
@@ -979,6 +1012,8 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
+ else if (IS_ALDERLAKE_P(dev_priv))
+ adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
else
tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
} else if (DISPLAY_VER(dev_priv) == 11) {
@@ -1425,7 +1460,10 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
return;
- ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
+ if (IS_ALDERLAKE_P(dev_priv))
+ ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
+ else
+ ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2772,7 +2810,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
conn_state);
/* FIXME precompute everything properly */
- /* FIXME how do we turn infoframes off again? */
if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3157,6 +3194,9 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
+ *
+ * On ADL_P the PHY link rate and lane count must be programmed but
+ * these are both 0 for HDMI.
*/
intel_de_write(dev_priv, DDI_BUF_CTL(port),
dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
@@ -4022,9 +4062,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
+ intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
if (dig_port)
@@ -4688,9 +4730,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /* Splitter enable for eDP MSO is supported for pipe A only. */
- if (dig_port->dp.mso_link_count)
+ /* Splitter enable for eDP MSO is limited to certain pipes. */
+ if (dig_port->dp.mso_link_count) {
encoder->pipe_mask = BIT(PIPE_A);
+ if (IS_ALDERLAKE_P(dev_priv))
+ encoder->pipe_mask |= BIT(PIPE_B);
+ }
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 7bcdd5c12028..8bfd00f49f2a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -735,6 +735,34 @@ static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
+static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x01 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x06 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x17 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x04 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0A }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x06 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x09 }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
+};
+
bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1348,6 +1376,31 @@ tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
+static const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3);
+ return adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3;
+ }
+
+ *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr);
+ return adlp_dkl_phy_dp_ddi_trans_hbr;
+}
+
+const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *default_entry)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index f8f0ef87e977..4c2efab38642 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -67,6 +67,10 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
+const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
const struct cnl_ddi_buf_trans *
tgl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 422b59ebf6dc..362bff9beb5c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -79,9 +79,9 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
-#include "intel_csr.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fdi.h"
@@ -975,6 +975,11 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
+ 0, PIPE_ARB_USE_PROG_SLOTS);
+
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
@@ -1690,7 +1695,8 @@ initial_plane_vma(struct drm_i915_private *i915,
* important and we should probably use that space with FBC or other
* features.
*/
- if (size * 2 > i915->stolen_usable_size)
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ size * 2 > i915->stolen_usable_size)
return NULL;
obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
@@ -2208,6 +2214,21 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
+
+ /*
+ * "The underrun recovery mechanism should be disabled
+ * when the following is enabled for this pipe:
+ * WiDi
+ * Downscaling (this includes YUV420 fullblend)
+ * COG
+ * DSC
+ * PSR2"
+ *
+ * FIXME: enable whenever possible...
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ tmp |= UNDERRUN_RECOVERY_DISABLE;
+
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
@@ -3675,7 +3696,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (IS_TIGERLAKE(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv))
+ return phy >= PHY_F && phy <= PHY_I;
+ else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
@@ -5714,8 +5737,12 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
+ int i;
switch (crtc_state->pipe_bpp) {
case 18:
@@ -5754,6 +5781,23 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ bool scaler_in_use = false;
+
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use)
+ continue;
+
+ scaler_in_use = true;
+ break;
+ }
+
+ intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
+ PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
+ scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
+ PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
+ }
+
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
@@ -7631,10 +7675,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
- drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
yesno(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
intel_vrr_vmin_vblank_start(pipe_config),
intel_vrr_vmax_vblank_start(pipe_config));
@@ -8270,6 +8315,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
+ if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -8558,6 +8613,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
+
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -8611,7 +8671,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
}
- PIPE_CONF_CHECK_X(infoframes.enable);
+ if (fastset && (current_config->has_psr || pipe_config->has_psr))
+ PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
+ ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
+ else
+ PIPE_CONF_CHECK_X(infoframes.enable);
+
PIPE_CONF_CHECK_X(infoframes.gcp);
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
@@ -8640,11 +8705,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
PIPE_CONF_CHECK_I(vrr.pipeline_full);
-
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
+ PIPE_CONF_CHECK_I(vrr.guardband);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -8750,6 +8811,38 @@ static void verify_wm_state(struct intel_crtc *crtc,
hw_wm_level->lines);
}
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
/* DDB */
hw_ddb_entry = &hw->ddb_y[plane->id];
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
@@ -9923,6 +10016,9 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (intel_any_crtc_needs_modeset(state))
+ any_ms = true;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -11040,7 +11136,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
@@ -11219,7 +11315,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ALDERLAKE_S(dev_priv)) {
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_TC1);
intel_ddi_init(dev_priv, PORT_TC2);
@@ -12192,7 +12295,7 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
- intel_csr_ucode_init(i915);
+ intel_dmc_ucode_init(i915);
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -12200,19 +12303,21 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->framestart_delay = 1; /* 1-4 */
+ i915->window2_delay = 0; /* No DSB so no window2 delay */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
ret = intel_dbuf_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
ret = intel_bw_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
init_llist_head(&i915->atomic_helper.free_list);
INIT_WORK(&i915->atomic_helper.free_work,
@@ -12224,8 +12329,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
-cleanup_vga_client_pw_domain_csr:
- intel_csr_ucode_fini(i915);
+cleanup_vga_client_pw_domain_dmc:
+ intel_dmc_ucode_fini(i915);
intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
cleanup_bios:
@@ -13304,7 +13409,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_csr_ucode_fini(i915);
+ intel_dmc_ucode_fini(i915);
intel_power_domains_driver_remove(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d77a0ab5cacf..88bb05d5c483 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,11 +7,11 @@
#include <drm/drm_fourcc.h>
#include "i915_debugfs.h"
-#include "intel_csr.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_fbc.h"
#include "intel_hdcp.h"
@@ -532,24 +532,24 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
- struct intel_csr *csr;
+ struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = {};
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return -ENODEV;
- csr = &dev_priv->csr;
+ dmc = &dev_priv->dmc;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
- seq_printf(m, "path: %s\n", csr->fw_path);
+ seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
+ seq_printf(m, "path: %s\n", dmc->fw_path);
- if (!csr->dmc_payload)
+ if (!intel_dmc_has_payload(dev_priv))
goto out;
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
if (DISPLAY_VER(dev_priv) >= 12) {
if (IS_DGFX(dev_priv)) {
@@ -568,10 +568,10 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(dev_priv, DMC_DEBUG3));
} else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT;
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
+ SKL_DMC_DC3_DC5_COUNT;
if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
- dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n",
@@ -582,10 +582,10 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
out:
seq_printf(m, "program base: 0x%08x\n",
- intel_de_read(dev_priv, CSR_PROGRAM(0)));
+ intel_de_read(dev_priv, DMC_PROGRAM(0)));
seq_printf(m, "ssp base: 0x%08x\n",
- intel_de_read(dev_priv, CSR_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+ intel_de_read(dev_priv, DMC_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 54c6d65011ee..4298ae684d7d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -9,10 +9,10 @@
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_csr.h"
#include "intel_display_power.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
@@ -291,8 +291,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
@@ -327,6 +326,15 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
return dig_port;
}
+static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ const struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+ return intel_port_to_phy(i915, dig_port->base.port);
+}
+
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool timeout_expected)
@@ -468,15 +476,13 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
@@ -508,7 +514,7 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
@@ -595,7 +601,7 @@ static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
@@ -619,11 +625,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* or need to enable AUX on a legacy TypeC port as part of the TC-cold
* exit sequence.
*/
- timeout_expected = is_tbt;
- if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) {
+ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
+ if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
icl_tc_cold_exit(dev_priv);
- timeout_expected = true;
- }
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
@@ -645,7 +649,7 @@ static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
@@ -657,11 +661,9 @@ static void
icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_enable(dev_priv,
@@ -674,11 +676,9 @@ static void
icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_disable(dev_priv,
@@ -829,8 +829,8 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
- dev_priv->csr.dc_state = val;
+ dev_priv->dmc.dc_state, val);
+ dev_priv->dmc.dc_state = val;
}
/**
@@ -865,8 +865,8 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->csr.allowed_dc_mask))
- state &= dev_priv->csr.allowed_dc_mask;
+ state & ~dev_priv->dmc.allowed_dc_mask))
+ state &= dev_priv->dmc.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -874,16 +874,16 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->csr.dc_state)
+ if ((val & mask) != dev_priv->dmc.dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
+ dev_priv->dmc.dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->csr.dc_state = val & mask;
+ dev_priv->dmc.dc_state = val & mask;
}
static u32
@@ -902,7 +902,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -958,15 +958,15 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
}
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
{
drm_WARN_ONCE(&dev_priv->drm,
- !intel_de_read(dev_priv, CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
- "CSR SSP Base Not fine\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
- "CSR HTP Not fine\n");
+ !intel_de_read(dev_priv, DMC_PROGRAM(0)),
+ "DMC program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
+ "DMC SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
+ "DMC HTP Not fine\n");
}
static struct i915_power_well *
@@ -1016,7 +1016,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->csr.target_dc_state)
+ if (state == dev_priv->dmc.target_dc_state)
goto unlock;
dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
@@ -1028,7 +1028,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
power_well->desc->ops->enable(dev_priv, power_well);
- dev_priv->csr.target_dc_state = state;
+ dev_priv->dmc.target_dc_state = state;
if (!dc_off_enabled)
power_well->desc->ops->disable(dev_priv, power_well);
@@ -1057,7 +1057,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
"DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
- assert_csr_loaded(dev_priv);
+ assert_dmc_loaded(dev_priv);
}
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
@@ -1084,7 +1084,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_csr_loaded(dev_priv);
+ assert_dmc_loaded(dev_priv);
}
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
@@ -1181,7 +1181,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -1220,10 +1220,10 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (!dev_priv->csr.dmc_payload)
+ if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->csr.target_dc_state) {
+ switch (dev_priv->dmc.target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -2265,6 +2265,12 @@ intel_display_power_put_async_work(struct work_struct *work)
fetch_and_zero(&power_domains->async_put_domains[1]);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref));
+ } else {
+ /*
+ * Cancel the work that got queued after this one got dequeued,
+ * since here we released the corresponding async-put reference.
+ */
+ cancel_delayed_work(&power_domains->async_put_work);
}
out_verify:
@@ -3072,7 +3078,6 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
@@ -3084,6 +3089,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
@@ -5090,10 +5099,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->csr.allowed_dc_mask =
+ dev_priv->dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->csr.target_dc_state =
+ dev_priv->dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
@@ -5245,6 +5254,9 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
{
enum dbuf_slice slice;
+ if (IS_ALDERLAKE_P(dev_priv))
+ return;
+
for_each_dbuf_slice(dev_priv, slice)
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
@@ -5256,6 +5268,9 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
u32 mask, val, i;
+ if (IS_ALDERLAKE_P(dev_priv))
+ return;
+
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
@@ -5573,8 +5588,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5640,8 +5655,8 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5706,8 +5721,8 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5863,8 +5878,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 12)
tgl_bw_buddy_init(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
/* Wa_14011508470 */
if (DISPLAY_VER(dev_priv) == 12) {
@@ -6218,13 +6233,13 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
* support don't manually deinit the power domains. This also means the
- * CSR/DMC firmware will stay active, it will power down any HW
+ * DMC firmware will stay active, it will power down any HW
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
- i915->csr.dmc_payload) {
+ intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
return;
@@ -6414,19 +6429,19 @@ void intel_display_power_resume(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
- if (i915->csr.dmc_payload) {
- if (i915->csr.allowed_dc_mask &
+ if (intel_dmc_has_payload(i915)) {
+ if (i915->dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->csr.allowed_dc_mask &
+ else if (i915->dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
- if (i915->csr.dmc_payload &&
- (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ if (intel_dmc_has_payload(i915) &&
+ (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 9c0adfc60c6f..ee7cbdd7db87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1202,7 +1202,7 @@ struct intel_crtc_state {
struct {
bool enable;
u8 pipeline_full;
- u16 flipline, vmin, vmax;
+ u16 flipline, vmin, vmax, guardband;
} vrr;
/* Stream Splitter for eDP MSO */
@@ -1482,6 +1482,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
+ bool paused;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1498,7 +1499,7 @@ struct intel_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
- bool dc3co_enabled;
+ u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
struct drm_dp_vsc_sdp vsc;
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 26a922d34263..97308da28059 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -26,14 +26,13 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_csr.h"
#include "intel_de.h"
+#include "intel_dmc.h"
/**
- * DOC: csr support for dmc
+ * DOC: DMC Firmware Support
*
- * Display Context Save and Restore (CSR) firmware support added from gen9
- * onwards to drive newly added DMC (Display microcontroller) in display
+ * From gen9 onwards we have newly added DMC (Display microcontroller) in display
* engine to save and restore the state of display engine when it enter into
* low-power state and comes back to normal.
*/
@@ -44,55 +43,55 @@
__stringify(major) "_" \
__stringify(minor) ".bin"
-#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define ADLS_CSR_PATH DMC_PATH(adls, 2, 01)
-#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
-MODULE_FIRMWARE(ADLS_CSR_PATH);
+#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
+#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
+MODULE_FIRMWARE(ADLS_DMC_PATH);
-#define DG1_CSR_PATH DMC_PATH(dg1, 2, 02)
-#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
-MODULE_FIRMWARE(DG1_CSR_PATH);
+#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
+#define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+MODULE_FIRMWARE(DG1_DMC_PATH);
-#define RKL_CSR_PATH DMC_PATH(rkl, 2, 02)
-#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
-MODULE_FIRMWARE(RKL_CSR_PATH);
+#define RKL_DMC_PATH DMC_PATH(rkl, 2, 02)
+#define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+MODULE_FIRMWARE(RKL_DMC_PATH);
-#define TGL_CSR_PATH DMC_PATH(tgl, 2, 08)
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
-MODULE_FIRMWARE(TGL_CSR_PATH);
+#define TGL_DMC_PATH DMC_PATH(tgl, 2, 08)
+#define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 8)
+MODULE_FIRMWARE(TGL_DMC_PATH);
-#define ICL_CSR_PATH DMC_PATH(icl, 1, 09)
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
-#define ICL_CSR_MAX_FW_SIZE 0x6000
-MODULE_FIRMWARE(ICL_CSR_PATH);
+#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
+#define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
+#define ICL_DMC_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_DMC_PATH);
-#define CNL_CSR_PATH DMC_PATH(cnl, 1, 07)
-#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(CNL_CSR_PATH);
+#define CNL_DMC_PATH DMC_PATH(cnl, 1, 07)
+#define CNL_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define CNL_DMC_MAX_FW_SIZE GLK_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_DMC_PATH);
-#define GLK_CSR_PATH DMC_PATH(glk, 1, 04)
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define GLK_CSR_MAX_FW_SIZE 0x4000
-MODULE_FIRMWARE(GLK_CSR_PATH);
+#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
+#define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define GLK_DMC_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_DMC_PATH);
-#define KBL_CSR_PATH DMC_PATH(kbl, 1, 04)
-#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(KBL_CSR_PATH);
+#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
+#define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_DMC_PATH);
-#define SKL_CSR_PATH DMC_PATH(skl, 1, 27)
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
-#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(SKL_CSR_PATH);
+#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
+#define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
+#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_DMC_PATH);
-#define BXT_CSR_PATH DMC_PATH(bxt, 1, 07)
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define BXT_CSR_MAX_FW_SIZE 0x3000
-MODULE_FIRMWARE(BXT_CSR_PATH);
+#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
+#define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define BXT_DMC_MAX_FW_SIZE 0x3000
+MODULE_FIRMWARE(BXT_DMC_PATH);
-#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define PACKAGE_MAX_FW_INFO_ENTRIES 20
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
#define DMC_V1_MAX_MMIO_COUNT 8
@@ -238,6 +237,11 @@ struct stepping_info {
char substepping;
};
+bool intel_dmc_has_payload(struct drm_i915_private *i915)
+{
+ return i915->dmc.dmc_payload;
+}
+
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -303,47 +307,47 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
}
/**
- * intel_csr_load_program() - write the firmware from memory to register.
+ * intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
*
- * CSR firmware is read from a .bin file and kept in internal memory one time.
+ * DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- u32 *payload = dev_priv->csr.dmc_payload;
+ u32 *payload = dev_priv->dmc.dmc_payload;
u32 i, fw_size;
- if (!HAS_CSR(dev_priv)) {
+ if (!HAS_DMC(dev_priv)) {
drm_err(&dev_priv->drm,
- "No CSR support available for this platform\n");
+ "No DMC support available for this platform\n");
return;
}
- if (!dev_priv->csr.dmc_payload) {
+ if (!intel_dmc_has_payload(dev_priv)) {
drm_err(&dev_priv->drm,
"Tried to program CSR with empty payload\n");
return;
}
- fw_size = dev_priv->csr.dmc_fw_size;
+ fw_size = dev_priv->dmc.dmc_fw_size;
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
for (i = 0; i < fw_size; i++)
- intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+ intel_uncore_write_fw(&dev_priv->uncore, DMC_PROGRAM(i),
payload[i]);
preempt_enable();
- for (i = 0; i < dev_priv->csr.mmio_count; i++) {
- intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
- dev_priv->csr.mmiodata[i]);
+ for (i = 0; i < dev_priv->dmc.mmio_count; i++) {
+ intel_de_write(dev_priv, dev_priv->dmc.mmioaddr[i],
+ dev_priv->dmc.mmiodata[i]);
}
- dev_priv->csr.dc_state = 0;
+ dev_priv->dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
}
@@ -357,7 +361,7 @@ static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
const struct stepping_info *si,
u8 package_ver)
{
- u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
+ u32 dmc_offset = DMC_DEFAULT_FW_OFFSET;
unsigned int i;
for (i = 0; i < num_entries; i++) {
@@ -392,17 +396,18 @@ static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
return dmc_offset;
}
-static u32 parse_csr_fw_dmc(struct intel_csr *csr,
- const struct intel_dmc_header_base *dmc_header,
- size_t rem_size)
+static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
u32 mmio_count, mmio_count_max;
u8 *payload;
- BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
- ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(dmc->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(dmc->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
/*
* Check if we can access common fields, we will checkc again below
@@ -440,34 +445,34 @@ static u32 parse_csr_fw_dmc(struct intel_csr *csr,
header_len_bytes = dmc_header->header_len;
dmc_header_size = sizeof(*v1);
} else {
- DRM_ERROR("Unknown DMC fw header version: %u\n",
- dmc_header->header_ver);
+ drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
+ dmc_header->header_ver);
return 0;
}
if (header_len_bytes != dmc_header_size) {
- DRM_ERROR("DMC firmware has wrong dmc header length "
- "(%u bytes)\n", header_len_bytes);
+ drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
+ "(%u bytes)\n", header_len_bytes);
return 0;
}
/* Cache the dmc header info. */
if (mmio_count > mmio_count_max) {
- DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
+ drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
return 0;
}
for (i = 0; i < mmio_count; i++) {
- if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
- mmioaddr[i] > CSR_MMIO_END_RANGE) {
- DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
- mmioaddr[i]);
+ if (mmioaddr[i] < DMC_MMIO_START_RANGE ||
+ mmioaddr[i] > DMC_MMIO_END_RANGE) {
+ drm_err(&i915->drm, "DMC firmware has wrong mmio address 0x%x\n",
+ mmioaddr[i]);
return 0;
}
- csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
- csr->mmiodata[i] = mmiodata[i];
+ dmc->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ dmc->mmiodata[i] = mmiodata[i];
}
- csr->mmio_count = mmio_count;
+ dmc->mmio_count = mmio_count;
rem_size -= header_len_bytes;
@@ -476,34 +481,33 @@ static u32 parse_csr_fw_dmc(struct intel_csr *csr,
if (rem_size < payload_size)
goto error_truncated;
- if (payload_size > csr->max_fw_size) {
- DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
+ if (payload_size > dmc->max_fw_size) {
+ drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
return 0;
}
- csr->dmc_fw_size = dmc_header->fw_size;
+ dmc->dmc_fw_size = dmc_header->fw_size;
- csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
- if (!csr->dmc_payload) {
- DRM_ERROR("Memory allocation failed for dmc payload\n");
+ dmc->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!dmc->dmc_payload)
return 0;
- }
payload = (u8 *)(dmc_header) + header_len_bytes;
- memcpy(csr->dmc_payload, payload, payload_size);
+ memcpy(dmc->dmc_payload, payload, payload_size);
return header_len_bytes + payload_size;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
static u32
-parse_csr_fw_package(struct intel_csr *csr,
+parse_dmc_fw_package(struct intel_dmc *dmc,
const struct intel_package_header *package_header,
const struct stepping_info *si,
size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries, dmc_offset;
const struct intel_fw_info *fw_info;
@@ -516,8 +520,8 @@ parse_csr_fw_package(struct intel_csr *csr,
} else if (package_header->header_ver == 2) {
max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
} else {
- DRM_ERROR("DMC firmware has unknown header version %u\n",
- package_header->header_ver);
+ drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
+ package_header->header_ver);
return 0;
}
@@ -530,8 +534,8 @@ parse_csr_fw_package(struct intel_csr *csr,
goto error_truncated;
if (package_header->header_len * 4 != package_size) {
- DRM_ERROR("DMC firmware has wrong package header length "
- "(%u bytes)\n", package_size);
+ drm_err(&i915->drm, "DMC firmware has wrong package header length "
+ "(%u bytes)\n", package_size);
return 0;
}
@@ -543,9 +547,9 @@ parse_csr_fw_package(struct intel_csr *csr,
((u8 *)package_header + sizeof(*package_header));
dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
package_header->header_ver);
- if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("DMC firmware not supported for %c stepping\n",
- si->stepping);
+ if (dmc_offset == DMC_DEFAULT_FW_OFFSET) {
+ drm_err(&i915->drm, "DMC firmware not supported for %c stepping\n",
+ si->stepping);
return 0;
}
@@ -553,51 +557,53 @@ parse_csr_fw_package(struct intel_csr *csr,
return package_size + dmc_offset * 4;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
/* Return number of bytes parsed or 0 on error */
-static u32 parse_csr_fw_css(struct intel_csr *csr,
+static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+
if (rem_size < sizeof(struct intel_css_header)) {
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong CSS header length "
- "(%u bytes)\n",
- (css_header->header_len * 4));
+ drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
+ (css_header->header_len * 4));
return 0;
}
- if (csr->required_version &&
- css_header->version != csr->required_version) {
- DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ if (dmc->required_version &&
+ css_header->version != dmc->required_version) {
+ drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
- CSR_VERSION_MAJOR(css_header->version),
- CSR_VERSION_MINOR(css_header->version),
- CSR_VERSION_MAJOR(csr->required_version),
- CSR_VERSION_MINOR(csr->required_version));
+ DMC_VERSION_MAJOR(css_header->version),
+ DMC_VERSION_MINOR(css_header->version),
+ DMC_VERSION_MAJOR(dmc->required_version),
+ DMC_VERSION_MINOR(dmc->required_version));
return 0;
}
- csr->version = css_header->version;
+ dmc->version = css_header->version;
return sizeof(struct intel_css_header);
}
-static void parse_csr_fw(struct drm_i915_private *dev_priv,
+static void parse_dmc_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_csr *csr = &dev_priv->csr;
+ struct intel_dmc *dmc = &dev_priv->dmc;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
u32 readcount = 0;
u32 r;
@@ -607,7 +613,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
- r = parse_csr_fw_css(csr, css_header, fw->size);
+ r = parse_dmc_fw_css(dmc, css_header, fw->size);
if (!r)
return;
@@ -615,7 +621,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract Package Header information */
package_header = (struct intel_package_header *)&fw->data[readcount];
- r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
+ r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
if (!r)
return;
@@ -623,49 +629,49 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract dmc_header information */
dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
- parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - readcount);
}
-static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
- dev_priv->csr.wakeref =
+ drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
+ dev_priv->dmc.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
-static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->csr.wakeref);
+ fetch_and_zero(&dev_priv->dmc.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
-static void csr_load_work_fn(struct work_struct *work)
+static void dmc_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
- struct intel_csr *csr;
+ struct intel_dmc *dmc;
const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), csr.work);
- csr = &dev_priv->csr;
+ dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
+ dmc = &dev_priv->dmc;
- request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev);
- parse_csr_fw(dev_priv, fw);
+ request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
+ parse_dmc_fw(dev_priv, fw);
- if (dev_priv->csr.dmc_payload) {
- intel_csr_load_program(dev_priv);
- intel_csr_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(dev_priv)) {
+ intel_dmc_load_program(dev_priv);
+ intel_dmc_runtime_pm_put(dev_priv);
drm_info(&dev_priv->drm,
"Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
} else {
drm_notice(&dev_priv->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
- csr->fw_path);
+ dmc->fw_path);
drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -674,152 +680,152 @@ static void csr_load_work_fn(struct work_struct *work)
}
/**
- * intel_csr_ucode_init() - initialize the firmware loading.
+ * intel_dmc_ucode_init() - initialize the firmware loading.
* @dev_priv: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
{
- struct intel_csr *csr = &dev_priv->csr;
+ struct intel_dmc *dmc = &dev_priv->dmc;
- INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
+ INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
/*
- * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+ * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
* runtime-suspend.
*
* On error, we return with the rpm wakeref held to prevent runtime
- * suspend as runtime suspend *requires* a working CSR for whatever
+ * suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_csr_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(dev_priv);
if (IS_ALDERLAKE_S(dev_priv)) {
- csr->fw_path = ADLS_CSR_PATH;
- csr->required_version = ADLS_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = ADLS_DMC_PATH;
+ dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (IS_DG1(dev_priv)) {
- csr->fw_path = DG1_CSR_PATH;
- csr->required_version = DG1_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = DG1_DMC_PATH;
+ dmc->required_version = DG1_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (IS_ROCKETLAKE(dev_priv)) {
- csr->fw_path = RKL_CSR_PATH;
- csr->required_version = RKL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = RKL_DMC_PATH;
+ dmc->required_version = RKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- csr->fw_path = TGL_CSR_PATH;
- csr->required_version = TGL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = TGL_DMC_PATH;
+ dmc->required_version = TGL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(dev_priv) == 11) {
- csr->fw_path = ICL_CSR_PATH;
- csr->required_version = ICL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = ICL_DMC_PATH;
+ dmc->required_version = ICL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
} else if (IS_CANNONLAKE(dev_priv)) {
- csr->fw_path = CNL_CSR_PATH;
- csr->required_version = CNL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = CNL_DMC_PATH;
+ dmc->required_version = CNL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = CNL_DMC_MAX_FW_SIZE;
} else if (IS_GEMINILAKE(dev_priv)) {
- csr->fw_path = GLK_CSR_PATH;
- csr->required_version = GLK_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ dmc->fw_path = GLK_DMC_PATH;
+ dmc->required_version = GLK_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
} else if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
- csr->fw_path = KBL_CSR_PATH;
- csr->required_version = KBL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = KBL_DMC_PATH;
+ dmc->required_version = KBL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
} else if (IS_SKYLAKE(dev_priv)) {
- csr->fw_path = SKL_CSR_PATH;
- csr->required_version = SKL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = SKL_DMC_PATH;
+ dmc->required_version = SKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
} else if (IS_BROXTON(dev_priv)) {
- csr->fw_path = BXT_CSR_PATH;
- csr->required_version = BXT_CSR_VERSION_REQUIRED;
- csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ dmc->fw_path = BXT_DMC_PATH;
+ dmc->required_version = BXT_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
if (dev_priv->params.dmc_firmware_path) {
if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- csr->fw_path = NULL;
+ dmc->fw_path = NULL;
drm_info(&dev_priv->drm,
- "Disabling CSR firmware and runtime PM\n");
+ "Disabling DMC firmware and runtime PM\n");
return;
}
- csr->fw_path = dev_priv->params.dmc_firmware_path;
+ dmc->fw_path = dev_priv->params.dmc_firmware_path;
/* Bypass version check for firmware override. */
- csr->required_version = 0;
+ dmc->required_version = 0;
}
- if (csr->fw_path == NULL) {
+ if (!dmc->fw_path) {
drm_dbg_kms(&dev_priv->drm,
- "No known CSR firmware for platform, disabling runtime PM\n");
+ "No known DMC firmware for platform, disabling runtime PM\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
- schedule_work(&dev_priv->csr.work);
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dev_priv->dmc.work);
}
/**
- * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
* @dev_priv: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
- flush_work(&dev_priv->csr.work);
+ flush_work(&dev_priv->dmc.work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!dev_priv->csr.dmc_payload)
- intel_csr_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_put(dev_priv);
}
/**
- * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * intel_dmc_ucode_resume() - init DMC firmware during system resume
* @dev_priv: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_csr_ucode_suspend().
+ * resources released in intel_dmc_ucode_suspend().
*/
-void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!dev_priv->csr.dmc_payload)
- intel_csr_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_get(dev_priv);
}
/**
- * intel_csr_ucode_fini() - unload the CSR firmware.
+ * intel_dmc_ucode_fini() - unload the DMC firmware.
* @dev_priv: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
- intel_csr_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
+ intel_dmc_ucode_suspend(dev_priv);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- kfree(dev_priv->csr.dmc_payload);
+ kfree(dev_priv->dmc.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
new file mode 100644
index 000000000000..4c22f567b61b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DMC_H__
+#define __INTEL_DMC_H__
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+#include <linux/workqueue.h>
+
+struct drm_i915_private;
+
+#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
+#define DMC_VERSION_MAJOR(version) ((version) >> 16)
+#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
+
+struct intel_dmc {
+ struct work_struct work;
+ const char *fw_path;
+ u32 required_version;
+ u32 max_fw_size; /* bytes */
+ u32 *dmc_payload;
+ u32 dmc_fw_size; /* dwords */
+ u32 version;
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+ intel_wakeref_t wakeref;
+};
+
+void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_ucode_fini(struct drm_i915_private *i915);
+void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
+void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+bool intel_dmc_has_payload(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 6bf6f1ec13ed..08bceae40aa8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
- * DPRX capabilities are read out.
- * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- * detection failure and the transparent LT mode was set. The DPRX
- * capabilities are read out.
- * <0 Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
int lttpr_count;
- bool ret;
int i;
- ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
- /* The DPTX shall read the DPRX caps after LTTPR detection. */
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
- return -EIO;
- }
-
- if (!ret)
- return 0;
-
- /*
- * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
- * at least 1.4.
- */
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
+ if (!intel_dp_read_lttpr_common_caps(intel_dp))
return 0;
- }
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
+ * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+ int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+ /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
+ return lttpr_count;
+}
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 18bfe8d09277..71ac57670043 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -149,6 +149,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
pll->info->name, onoff(state), onoff(cur_state));
}
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
+{
+ return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
+}
+
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
+{
+ return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
+}
+
static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private *i915,
struct intel_shared_dpll *pll)
@@ -161,6 +171,19 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
return CNL_DPLL_ENABLE(pll->info->id);
}
+static i915_reg_t
+intel_tc_pll_enable_reg(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_PORTTC_PLL_ENABLE(tc_port);
+
+ return MG_PLL_ENABLE(tc_port);
+}
+
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
* @crtc_state: CRTC, and its state, which has a shared dpll
@@ -3120,16 +3143,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
}
-static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
-{
- return id - DPLL_ID_ICL_MGPLL1;
-}
-
-enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
-{
- return tc_port + DPLL_ID_ICL_MGPLL1;
-}
-
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state,
@@ -3728,12 +3741,14 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret = false;
u32 val;
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
+
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
@@ -3797,7 +3812,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
if (!(val & PLL_ENABLE))
goto out;
@@ -4169,8 +4184,7 @@ static void tbt_pll_enable(struct drm_i915_private *dev_priv,
static void mg_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg =
- MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
icl_pll_power_enable(dev_priv, pll, enable_reg);
@@ -4249,8 +4263,7 @@ static void tbt_pll_disable(struct drm_i915_private *dev_priv,
static void mg_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg =
- MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
icl_pll_disable(dev_priv, pll, enable_reg);
}
@@ -4416,6 +4429,26 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info adlp_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adlp_pll_mgr = {
+ .dpll_info = adlp_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -4429,7 +4462,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv))
+ dpll_mgr = &adlp_pll_mgr;
+ else if (IS_ALDERLAKE_S(dev_priv))
dpll_mgr = &adls_pll_mgr;
else if (IS_DG1(dev_priv))
dpll_mgr = &dg1_pll_mgr;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index a005c68889e7..c60a81a81c09 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -355,8 +355,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
unsigned int height;
u32 alignment;
- if (DISPLAY_VER(i915) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
+ /*
+ * All DPT color planes must be 512*4k aligned (the amount mapped by a
+ * single DPT page). For ADL_P CCS FBs this only works by requiring
+ * the allocated offsets to be 2MB aligned. Once supoort to remap
+ * such FBs is added we can remove this requirement, as then all the
+ * planes can be remapped to an aligned offset.
+ */
+ if (IS_ALDERLAKE_P(i915) && is_ccs_modifier(fb->modifier))
+ alignment = 512 * 4096;
+ else if (DISPLAY_VER(i915) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
alignment = intel_tile_size(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 3315aa1d4d5a..eb841960840d 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -185,15 +185,34 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
+static u32
+icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = PIPE_STATUS_UNDERRUN;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD |
+ PIPE_STATUS_HARD_UNDERRUN_XELPD |
+ PIPE_STATUS_PORT_UNDERRUN_XELPD;
+
+ return mask;
+}
+
static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = gen8_de_pipe_underrun_mask(dev_priv);
- if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
- else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ if (enable) {
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe),
+ icl_pipe_status_underrun_mask(dev_priv));
+
+ bdw_enable_pipe_irq(dev_priv, pipe, mask);
+ } else {
+ bdw_disable_pipe_irq(dev_priv, pipe, mask);
+ }
}
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -373,6 +392,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ u32 underruns = 0;
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -383,10 +403,35 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
crtc->cpu_fifo_underrun_disabled)
return;
+ /*
+ * Starting with display version 11, the PIPE_STAT register records
+ * whether an underrun has happened, and on XELPD+, it will also record
+ * whether the underrun was soft/hard and whether it was triggered by
+ * the downstream port logic. We should clear these bits (which use
+ * write-1-to-clear logic) too.
+ *
+ * Note that although the IIR gives us the same underrun and soft/hard
+ * information, PIPE_STAT is the only place we can find out whether
+ * the underrun was caused by the downstream port.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ underruns = intel_de_read(dev_priv, ICL_PIPESTATUS(pipe)) &
+ icl_pipe_status_underrun_mask(dev_priv);
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe), underruns);
+ }
+
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n",
+ pipe_name(pipe),
+ underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "",
+ underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "",
+ underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "",
+ underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : "");
+ else
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 4a1b2d863b0c..7e51c98c475e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1865,28 +1865,69 @@ static int intel_hdmi_port_clock(int clock, int bpc)
return clock * bpc / 8;
}
+static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ switch (bpc) {
+ case 12:
+ if (HAS_GMCH(i915))
+ return false;
+
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
+ else
+ return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
+ case 10:
+ if (DISPLAY_VER(i915) < 11)
+ return false;
+
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
+ else
+ return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30;
+ case 8:
+ return true;
+ default:
+ MISSING_CASE(bpc);
+ return false;
+ }
+}
+
static enum drm_mode_status
-intel_hdmi_mode_clock_valid(struct intel_hdmi *hdmi, int clock, bool has_hdmi_sink)
+intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+ bool has_hdmi_sink, bool ycbcr420_output)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status;
+ if (ycbcr420_output)
+ clock /= 2;
+
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
true, has_hdmi_sink);
- if (has_hdmi_sink) {
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (status != MODE_OK && !HAS_GMCH(dev_priv))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
- true, has_hdmi_sink);
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (status != MODE_OK &&
+ intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
+ true, has_hdmi_sink);
- /* if we can't do 8,12bpc we may still be able to do 10bpc */
- if (status != MODE_OK && DISPLAY_VER(dev_priv) >= 11)
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
- true, has_hdmi_sink);
- }
+ /* if we can't do 8,12bpc we may still be able to do 10bpc */
+ if (status != MODE_OK &&
+ intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
+ true, has_hdmi_sink);
return status;
}
@@ -1920,18 +1961,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
}
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
- if (ycbcr_420_only)
- clock /= 2;
- status = intel_hdmi_mode_clock_valid(hdmi, clock, has_hdmi_sink);
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
if (status != MODE_OK) {
if (ycbcr_420_only ||
!connector->ycbcr_420_allowed ||
!drm_mode_is_420_also(&connector->display_info, mode))
return status;
- clock /= 2;
- status = intel_hdmi_mode_clock_valid(hdmi, clock, has_hdmi_sink);
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, true);
if (status != MODE_OK)
return status;
}
@@ -1950,32 +1988,12 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (crtc_state->pipe_bpp < bpc * 3)
return false;
- if (!has_hdmi_sink)
- return false;
-
for_each_new_connector_in_state(state, connector, connector_state, i) {
- const struct drm_display_info *info = &connector->display_info;
-
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
- if (ycbcr420_output) {
- const struct drm_hdmi_info *hdmi = &info->hdmi;
-
- if (bpc == 12 && !(hdmi->y420_dc_modes &
- DRM_EDID_YCBCR420_DC_36))
- return false;
- else if (bpc == 10 && !(hdmi->y420_dc_modes &
- DRM_EDID_YCBCR420_DC_30))
- return false;
- } else {
- if (bpc == 12 && !(info->edid_hdmi_dc_modes &
- DRM_EDID_HDMI_DC_36))
- return false;
- else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
- DRM_EDID_HDMI_DC_30))
- return false;
- }
+ if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ return false;
}
return true;
@@ -1989,12 +2007,6 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (HAS_GMCH(dev_priv))
- return false;
-
- if (bpc == 10 && DISPLAY_VER(dev_priv) < 11)
- return false;
-
/*
* HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index acaf3d459821..77865cf6641f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -638,7 +638,7 @@ unlock:
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
- if (!intel_dp->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_exitline)
return;
cancel_delayed_work(&intel_dp->psr.dc3co_work);
@@ -646,12 +646,26 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
tgl_psr2_disable_dc3co(intel_dp);
}
+static bool
+dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (IS_ALDERLAKE_P(dev_priv))
+ return pipe <= PIPE_B && port <= PORT_B;
+ else
+ return pipe == PIPE_A && port == PORT_A;
+}
+
static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 exit_scanlines;
@@ -669,12 +683,10 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
- /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
- dig_port->base.port != PORT_A)
+ if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
return;
/*
@@ -753,6 +765,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /*
+ * We are missing the implementation of some workarounds to enabled PSR2
+ * in Alderlake_P, until ready PSR2 should be kept disabled.
+ */
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
+ return false;
+ }
+
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not supported in transcoder %s\n",
@@ -969,11 +990,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
@@ -1010,7 +1030,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (crtc_state->dc3co_exitline) {
+ if (intel_dp->psr.dc3co_exitline) {
u32 val;
/*
@@ -1019,7 +1039,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
val &= ~EXITLINE_MASK;
- val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+ val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
val |= EXITLINE_ENABLE;
intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
}
@@ -1030,27 +1050,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
IGNORE_PSR2_HW_TRACKING : 0);
}
-static void intel_psr_enable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
u32 val;
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
-
- intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
- intel_dp->psr.busy_frontbuffer_bits = 0;
- intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
- /* DC5/DC6 requires at least 6 idle frames */
- val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
- intel_dp->psr.dc3co_exit_delay = val;
- intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
-
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
@@ -1071,17 +1075,45 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
"PSR interruption error set, not enabling PSR\n");
- return;
+ return false;
}
+ return true;
+}
+
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+
+ if (!psr_interrupt_error_check(intel_dp))
+ return;
+
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
&intel_dp->psr.vsc);
intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
intel_psr_enable_sink(intel_dp);
- intel_psr_enable_source(intel_dp, crtc_state);
+ intel_psr_enable_source(intel_dp);
intel_dp->psr.enabled = true;
+ intel_dp->psr.paused = false;
intel_psr_activate(intel_dp);
}
@@ -1151,22 +1183,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
intel_dp->psr.active = false;
}
-static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t psr_status;
u32 psr_status_mask;
- lockdep_assert_held(&intel_dp->psr.lock);
-
- if (!intel_dp->psr.enabled)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
-
- intel_psr_exit(intel_dp);
-
if (intel_dp->psr.psr2_enabled) {
psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
@@ -1179,6 +1201,22 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
/* WA 1408330847 */
if (intel_dp->psr.psr2_sel_fetch_enabled &&
@@ -1223,6 +1261,61 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
+/**
+ * intel_psr_pause - Pause PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after enabling psr.
+ */
+void intel_psr_pause(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->enabled) {
+ mutex_unlock(&psr->lock);
+ return;
+ }
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ psr->paused = true;
+
+ mutex_unlock(&psr->lock);
+
+ cancel_work_sync(&psr->work);
+ cancel_delayed_work_sync(&psr->dc3co_work);
+}
+
+/**
+ * intel_psr_resume - Resume PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after pausing psr.
+ */
+void intel_psr_resume(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->paused)
+ goto unlock;
+
+ psr->paused = false;
+ intel_psr_activate(intel_dp);
+
+unlock:
+ mutex_unlock(&psr->lock);
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1818,7 +1911,7 @@ tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
{
mutex_lock(&intel_dp->psr.lock);
- if (!intel_dp->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_exitline)
goto unlock;
if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
@@ -1877,6 +1970,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
+ /*
+ * If the PSR is paused by an explicit intel_psr_paused() call,
+ * we have to ensure that the PSR is not activated until
+ * intel_psr_resume() is called.
+ */
+ if (intel_dp->psr.paused) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
/* By definition flush = invalidate + flush */
if (pipe_frontbuffer_bits)
psr_force_hw_tracking_exit(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index e3db85e97f4c..641521b101c8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -51,5 +51,7 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane);
+void intel_psr_pause(struct intel_dp *intel_dp);
+void intel_psr_resume(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
new file mode 100644
index 000000000000..c626a24fe98f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_dsc.h>
+
+#include "i915_utils.h"
+#include "intel_qp_tables.h"
+
+/* from BPP 6 to 24 in steps of 0.5 */
+#define RC_RANGE_QP444_8BPC_MAX_NUM_BPP 37
+
+/* from BPP 6 to 30 in steps of 0.5 */
+#define RC_RANGE_QP444_10BPC_MAX_NUM_BPP 49
+
+/* from BPP 6 to 36 in steps of 0.5 */
+#define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61
+
+/*
+ * These qp tables are as per the C model
+ * and it has the rows pointing to bpps which increment
+ * in steps of 0.5
+ * We do not support fractional bpps as of today,
+ * hence we would skip the fractional bpps during
+ * our references for qp calclulations.
+ */
+static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 8, 8,
+ 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 11, 11, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6,
+ 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 9,
+ 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1,
+ 1, 0, 0 },
+ { 10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 0 },
+ { 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6,
+ 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1,
+ 1, 1, 1 },
+ { 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 1, 1, 1 },
+ { 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8,
+ 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4,
+ 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 13, 13, 13,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7,
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 12, 11, 11, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4,
+ 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8, 7,
+ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,
+ 2, 2, 1, 1, 1, 1, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 10, 9, 9,
+ 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3,
+ 3, 2, 2, 2, 1, 1, 1, 1 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9,
+ 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 2, 2, 2, 2, 1, 1 },
+ { 16, 15, 15, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 10,
+ 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 12, 11, 11,
+ 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 },
+ { 16, 16, 16, 15, 15, 15, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 16, 16, 16, 16, 15, 15, 15, 15, 15, 14, 14, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6,
+ 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 10, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3,
+ 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 7, 7, 7,
+ 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 8,
+ 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 9, 9,
+ 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10,
+ 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4,
+ 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0 },
+ { 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12,
+ 12, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1 },
+ { 22, 22, 21, 21, 20, 20, 20, 20, 19, 19, 18, 18, 18, 18, 17, 17, 17, 16, 16,
+ 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 11,
+ 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3 }
+};
+
+static const u8 rc_range_maxqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 12, 12, 12, 12, 12, 12, 11, 11, 11, 10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5,
+ 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 15, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 10, 10, 9, 9,
+ 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 11, 11, 10,
+ 10, 10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2,
+ 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11, 10,
+ 10, 10, 10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 17, 16, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 11, 10, 10, 10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5,
+ 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6,
+ 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 17, 17, 16, 16, 16, 15, 15, 14, 14, 13, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1 },
+ { 20, 19, 19, 18, 18, 18, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 14, 14, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1 },
+ { 20, 20, 19, 19, 18, 18, 18, 18, 18, 18, 17, 17, 17, 16, 16, 15, 15, 14, 14,
+ 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9,
+ 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 18, 17, 17, 16, 16, 16,
+ 16, 15, 15, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10,
+ 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2 },
+ { 23, 23, 22, 22, 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 17, 17,
+ 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 12,
+ 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 }
+};
+
+#define PARAM_TABLE(_minmax, _bpc, _row, _col) do { \
+ if (bpc == (_bpc)) \
+ return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \
+} while (0)
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(min, 8, buf_i, bpp_i);
+ PARAM_TABLE(min, 10, buf_i, bpp_i);
+ PARAM_TABLE(min, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
+
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(max, 8, buf_i, bpp_i);
+ PARAM_TABLE(max, 10, buf_i, bpp_i);
+ PARAM_TABLE(max, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.h b/drivers/gpu/drm/i915/display/intel_qp_tables.h
new file mode 100644
index 000000000000..9fb3c36bd7c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_QP_TABLES_H_
+#define _INTEL_QP_TABLES_H_
+
+#include <linux/types.h>
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i);
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index e325463acddd..c23c210a55f5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -26,9 +26,7 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- if (DISPLAY_VER(i915) == 11)
+ if (intel_tc_cold_requires_aux_pw(dig_port))
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
else
return POWER_DOMAIN_TC_COLD_OFF;
@@ -205,7 +203,7 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
-static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
@@ -238,6 +236,40 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
+static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val, mask = 0;
+
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
+ mask |= BIT(TC_PORT_DP_ALT);
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
+ mask |= BIT(TC_PORT_TBT_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_port_live_status_mask(dig_port);
+
+ return icl_tc_port_live_status_mask(dig_port);
+}
+
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -256,6 +288,33 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
+static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & TCSS_DDI_STATUS_READY;
+}
+
+static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_status_complete(dig_port);
+
+ return icl_tc_phy_status_complete(dig_port);
+}
+
static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
bool take)
{
@@ -280,7 +339,7 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
- if (!take && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+ if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
drm_dbg_kms(&i915->drm,
"Port %s: PHY complete clear timed out\n",
dig_port->tc_port_name);
@@ -288,6 +347,34 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
return true;
}
+static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+ bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ if (take)
+ val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ else
+ val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
+
+ return true;
+}
+
+static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_take_ownership(dig_port, take);
+
+ return icl_tc_phy_take_ownership(dig_port, take);
+}
+
static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -306,6 +393,27 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
}
+static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+}
+
+static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_is_owned(dig_port);
+
+ return icl_tc_phy_is_owned(dig_port);
+}
+
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -323,13 +431,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int max_lanes;
- if (!icl_tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
- if (!icl_tc_phy_take_ownership(dig_port, true) &&
+ if (!tc_phy_take_ownership(dig_port, true) &&
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
@@ -364,7 +472,7 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
return;
out_release_phy:
- icl_tc_phy_take_ownership(dig_port, false);
+ tc_phy_take_ownership(dig_port, false);
out_set_tbt_alt_mode:
dig_port->tc_mode = TC_PORT_TBT_ALT;
}
@@ -380,7 +488,7 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
/* Nothing to do, we never disconnect from legacy mode */
break;
case TC_PORT_DP_ALT:
- icl_tc_phy_take_ownership(dig_port, false);
+ tc_phy_take_ownership(dig_port, false);
dig_port->tc_mode = TC_PORT_TBT_ALT;
break;
case TC_PORT_TBT_ALT:
@@ -395,13 +503,13 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- if (!icl_tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
- if (!icl_tc_phy_is_owned(dig_port)) {
+ if (!tc_phy_is_owned(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
dig_port->tc_port_name);
@@ -419,8 +527,8 @@ intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
u32 live_status_mask = tc_port_live_status_mask(dig_port);
enum tc_port_mode mode;
- if (!icl_tc_phy_is_owned(dig_port) ||
- drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
+ if (!tc_phy_is_owned(dig_port) ||
+ drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
@@ -442,7 +550,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
if (live_status_mask)
return fls(live_status_mask) - 1;
- return icl_tc_phy_status_complete(dig_port) &&
+ return tc_phy_status_complete(dig_port) &&
dig_port->tc_legacy_port ? TC_PORT_LEGACY :
TC_PORT_TBT_ALT;
}
@@ -454,7 +562,7 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- if (DISPLAY_VER(i915) != 11 || !dig_port->tc_legacy_port) {
+ if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain;
bool aux_powered;
@@ -624,13 +732,11 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig
if (!INTEL_INFO(i915)->display.has_modular_fia)
return false;
- /* TODO: check if in real HW MODULAR_FIA_MASK is set, if so remove this block */
- if (IS_ALDERLAKE_P(i915))
- return true;
-
+ mutex_lock(&dig_port->tc_lock);
wakeref = tc_cold_block(dig_port);
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
tc_cold_unblock(dig_port, wakeref);
+ mutex_unlock(&dig_port->tc_lock);
drm_WARN_ON(&i915->drm, val == 0xffffffff);
@@ -673,3 +779,11 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc_link_refcount = 0;
tc_port_load_fia_params(i915, dig_port);
}
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
+ IS_ALDERLAKE_P(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index b619e4736f85..0eacbd76ec15 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -29,4 +29,6 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+
#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index ce73ebdfc669..aa52af7891f0 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1307,7 +1307,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
* the active portion. Hence following this formula seems
* more trouble that it's worth.
*
- * if (IS_GEN(dev_priv, 4)) {
+ * if (GRAPHICS_VER(dev_priv) == 4) {
* num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
* den = tv_mode->clock;
* } else {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index efc3184d8315..7121b66bf96d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -5,12 +5,13 @@
* Author: Gaurav K Singh <gaurav.k.singh@intel.com>
* Manasi Navare <manasi.d.navare@intel.com>
*/
-
+#include <linux/limits.h>
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_vdsc.h"
+#include "intel_qp_tables.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
@@ -373,12 +374,81 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
return true;
}
+static void
+calculate_rc_params(struct rc_parameters *rc,
+ struct drm_dsc_config *vdsc_cfg)
+{
+ int bpc = vdsc_cfg->bits_per_component;
+ int bpp = vdsc_cfg->bits_per_pixel >> 4;
+ int ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 };
+ int ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
+ int ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
+ int ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 };
+ int qp_bpc_modifier = (bpc - 8) * 2;
+ u32 res, buf_i, bpp_i;
+
+ if (vdsc_cfg->slice_height >= 8)
+ rc->first_line_bpg_offset =
+ 12 + DIV_ROUND_UP((9 * min(34, vdsc_cfg->slice_height - 8)), 100);
+ else
+ rc->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1);
+
+ /* Our hw supports only 444 modes as of today */
+ if (bpp >= 12)
+ rc->initial_offset = 2048;
+ else if (bpp >= 10)
+ rc->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2);
+ else if (bpp >= 8)
+ rc->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2);
+ else
+ rc->initial_offset = 6144;
+
+ /* initial_xmit_delay = rc_model_size/2/compression_bpp */
+ rc->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp);
+
+ rc->flatness_min_qp = 3 + qp_bpc_modifier;
+ rc->flatness_max_qp = 12 + qp_bpc_modifier;
+
+ rc->rc_quant_incr_limit0 = 11 + qp_bpc_modifier;
+ rc->rc_quant_incr_limit1 = 11 + qp_bpc_modifier;
+
+ bpp_i = (2 * (bpp - 6));
+ for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+ /* Read range_minqp and range_max_qp from qp tables */
+ rc->rc_range_params[buf_i].range_min_qp =
+ intel_lookup_range_min_qp(bpc, buf_i, bpp_i);
+ rc->rc_range_params[buf_i].range_max_qp =
+ intel_lookup_range_max_qp(bpc, buf_i, bpp_i);
+
+ /* Calculate range_bgp_offset */
+ if (bpp <= 6) {
+ rc->rc_range_params[buf_i].range_bpg_offset = ofs_und6[buf_i];
+ } else if (bpp <= 8) {
+ res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und6[buf_i] + res;
+ } else if (bpp <= 12) {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und8[buf_i];
+ } else if (bpp <= 15) {
+ res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und12[buf_i] + res;
+ } else {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und15[buf_i];
+ }
+ }
+}
+
int intel_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
const struct rc_parameters *rc_params;
+ struct rc_parameters *rc = NULL;
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
@@ -413,9 +483,24 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_buf_thresh[13] = 0x7D;
}
- rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
- if (!rc_params)
- return -EINVAL;
+ /*
+ * From XE_LPD onwards we supports compression bpps in steps of 1
+ * upto uncompressed bpp-1, hence add calculations for all the rc
+ * parameters
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ rc = kmalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ calculate_rc_params(rc, vdsc_cfg);
+ rc_params = rc;
+ } else {
+ rc_params = get_rc_params(compressed_bpp,
+ vdsc_cfg->bits_per_component);
+ if (!rc_params)
+ return -EINVAL;
+ }
vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
@@ -441,20 +526,20 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
/*
* BitsPerComponent value determines mux_word_size:
- * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
- * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
- * 48 bits
+ * When BitsPerComponent is less than or 10bpc, muxWordSize will be equal to
+ * 48 bits otherwise 64
*/
- if (vdsc_cfg->bits_per_component == 8 ||
- vdsc_cfg->bits_per_component == 10)
+ if (vdsc_cfg->bits_per_component <= 10)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
- else if (vdsc_cfg->bits_per_component == 12)
+ else
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+ kfree(rc);
+
return 0;
}
@@ -1076,12 +1161,12 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!(old_crtc_state->dsc.compression_enable &&
- old_crtc_state->bigjoiner))
- return;
-
- intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ /* Disable only if either of them is enabled */
+ if (old_crtc_state->dsc.compression_enable ||
+ old_crtc_state->bigjoiner) {
+ intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
+ intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ }
}
void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 046210ae1de0..c335b1dbafcf 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -68,7 +68,10 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* The hw imposes the extra scanline before frame start */
- return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+ if (DISPLAY_VER(i915) >= 13)
+ return crtc_state->vrr.guardband + i915->framestart_delay + 1;
+ else
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
@@ -86,6 +89,8 @@ void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -124,17 +129,26 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
/*
- * FIXME: s/4/framestart_delay+1/ to get consistent
- * earliest/latest points for register latching regardless
- * of the framestart_delay used?
- *
- * FIXME: this really needs the extra scanline to provide consistent
- * behaviour for all framestart_delay values. Otherwise with
- * framestart_delay==3 we will end up extending the min vblank by
- * one extra line.
+ * For XE_LPD+, we use guardband and pipeline override
+ * is deprecated.
*/
- crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ if (DISPLAY_VER(i915) >= 13)
+ crtc_state->vrr.guardband =
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ i915->window2_delay;
+ else
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
@@ -149,10 +163,15 @@ void intel_vrr_enable(struct intel_encoder *encoder,
if (!crtc_state->vrr.enable)
return;
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
- VRR_CTL_PIPELINE_FULL_OVERRIDE;
+ if (DISPLAY_VER(dev_priv) >= 13)
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
@@ -199,8 +218,13 @@ void intel_vrr_get_config(struct intel_crtc *crtc,
if (!crtc_state->vrr.enable)
return;
- if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
- crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (DISPLAY_VER(dev_priv) >= 13)
+ crtc_state->vrr.guardband =
+ REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
+ else
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full =
+ REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 59e032f3687a..92a4fd508e92 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -850,6 +850,29 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
return 0;
}
+static u32 adlp_plane_ctl_arb_slots(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ switch (fb->format->cpp[0]) {
+ case 2:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ } else {
+ switch (fb->format->cpp[0]) {
+ case 8:
+ return PLANE_CTL_ARB_SLOTS(3);
+ case 4:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ }
+}
+
static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -903,6 +926,10 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
+
return plane_ctl;
}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629..7df91b7e4ca8 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..6234e17259c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
@@ -113,11 +113,10 @@ retry:
seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy =
- busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+ args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
/* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->base.resv->fence);
+ list = dma_resv_shared_list(obj->base.resv);
if (list) {
unsigned int shared_count = list->shared_count, i;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 297143511f99..66789111a24b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
if (DBG_FORCE_RELOC)
return false;
- return !dma_resv_test_signaled_rcu(vma->resv, true);
+ return !dma_resv_test_signaled(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2ebd79537aea..7c0eb425cb3b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec760..b5cbbe659a77 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4b9856d5ba14..1e97520c62b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (excl && timeout >= 0)
@@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index ca00b45827b7..50a98ce39f74 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -359,6 +359,7 @@ struct i915_ppgtt {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
#define i915_is_dpt(vm) ((vm)->is_dpt)
+#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
int __must_check
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
@@ -393,7 +394,7 @@ static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
- GEM_BUG_ON(i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
return container_of(vm, struct i915_ppgtt, vm);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index ca9c9e27a43d..c4118b808268 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1006,7 +1006,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(s->engine->i915, 9) &&
+ if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1390,7 +1390,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (INTEL_GEN(s->engine->i915) >= 9) {
+ if (GRAPHICS_VER(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1418,7 +1418,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1446,7 +1446,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
{
if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(s->engine->i915) >= 9)
+ if (GRAPHICS_VER(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index d4f883f35b95..8e65cd8258b9 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -223,7 +223,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 0889ad8291b0..11a8baba6822 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -151,7 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -215,7 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
@@ -256,9 +256,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
- (INTEL_GEN(dev_priv) >= 9) ?
- (_PRI_PLANE_STRIDE_MASK >> 6) :
- _PRI_PLANE_STRIDE_MASK, plane->bpp);
+ (GRAPHICS_VER(dev_priv) >= 9) ?
+ (_PRI_PLANE_STRIDE_MASK >> 6) :
+ _PRI_PLANE_STRIDE_MASK, plane->bpp);
plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9478c132d7b6..cc2c05e18206 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1055,12 +1055,12 @@ static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ if (GRAPHICS_VER(dev_priv) == 9 || GRAPHICS_VER(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
GAMW_ECO_ENABLE_64K_IPS_FIELD;
return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (GRAPHICS_VER(dev_priv) >= 11) {
/* 64K paging only controlled by IPS bit in PTE now. */
return true;
} else
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index dda320749c65..98eb48c24c46 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -220,7 +220,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
+ if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -286,7 +286,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
+ if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -1174,7 +1174,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
+ if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -3342,9 +3342,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
- MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
- MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
- MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
+ MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
@@ -3655,7 +3655,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
* otherwise, need to update cmd_reg_handler in cmd_parser.c
*/
static struct gvt_mmio_block mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 497d28ce47df..614b951d919f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -585,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
+ } else if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c9589e26af93..b8ac80765461 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -373,7 +373,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
+ if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
@@ -409,7 +409,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
+ if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -474,7 +474,7 @@ static void switch_mmio(struct intel_vgpu *pre,
struct engine_mmio *mmio;
u32 old_v, new_v;
- if (INTEL_GEN(engine->i915) >= 9)
+ if (GRAPHICS_VER(engine->i915) >= 9)
switch_mocs(pre, next, engine);
for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
@@ -486,7 +486,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(engine->i915, 9) && mmio->in_context)
+ if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
continue;
// save
@@ -580,7 +580,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->gt->i915) >= 9) {
+ if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index fc735692f21f..734c37c5e347 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,7 +364,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
+ if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -1148,7 +1148,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
static int workload_thread(void *arg)
{
struct intel_engine_cs *engine = arg;
- const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9;
struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 9039787f123a..fa6b92615799 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -149,10 +149,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->gt->i915, 8))
+ if (GRAPHICS_VER(gvt->gt->i915) == 8)
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
- else if (IS_GEN(gvt->gt->i915, 9))
+ else if (GRAPHICS_VER(gvt->gt->i915) == 9)
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 5b4b2bd46e7c..3992c25a191d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -946,8 +946,8 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
- engine->class == COPY_ENGINE_CLASS))
+ if (GRAPHICS_VER(engine->i915) != 7 && !(GRAPHICS_VER(engine->i915) == 9 &&
+ engine->class == COPY_ENGINE_CLASS))
return 0;
switch (engine->class) {
@@ -977,7 +977,7 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
break;
case COPY_ENGINE_CLASS:
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
- if (IS_GEN(engine->i915, 9)) {
+ if (GRAPHICS_VER(engine->i915) == 9) {
cmd_tables = gen9_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
engine->get_cmd_length_mask =
@@ -993,7 +993,7 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_GEN(engine->i915, 9)) {
+ if (GRAPHICS_VER(engine->i915) == 9) {
engine->reg_tables = gen9_blt_reg_tables;
engine->reg_table_count =
ARRAY_SIZE(gen9_blt_reg_tables);
@@ -1537,7 +1537,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
if (IS_HASWELL(engine->i915))
flags = MI_BATCH_NON_SECURE_HSW;
- GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ GEM_BUG_ON(!IS_GRAPHICS_VER(engine->i915, 6, 7));
__gen6_emit_bb_start(batch_end,
batch_addr,
flags);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8dd374691102..cc745751ac53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -124,6 +124,17 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
}
}
+static const char *stringify_vma_type(const struct i915_vma *vma)
+{
+ if (i915_vma_is_ggtt(vma))
+ return "ggtt";
+
+ if (i915_vma_is_dpt(vma))
+ return "dpt";
+
+ return "ppgtt";
+}
+
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -156,11 +167,11 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (i915_vma_is_pinned(vma))
pin_count++;
- seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
- i915_vma_is_ggtt(vma) ? "g" : "pp",
+ seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
+ stringify_vma_type(vma),
vma->node.start, vma->node.size,
stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
- if (i915_vma_is_ggtt(vma)) {
+ if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
seq_puts(m, ", normal");
@@ -350,7 +361,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- if (IS_GEN(dev_priv, 5)) {
+ if (GRAPHICS_VER(dev_priv) == 5) {
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
@@ -397,7 +408,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ } else if (GRAPHICS_VER(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
u32 rp_state_cap;
@@ -421,7 +432,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (GRAPHICS_VER(dev_priv) >= 9)
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -447,7 +458,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (GRAPHICS_VER(dev_priv) >= 11) {
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
@@ -456,7 +467,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
*/
pm_isr = 0;
pm_iir = 0;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (GRAPHICS_VER(dev_priv) >= 8) {
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
@@ -479,14 +490,14 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
- if (INTEL_GEN(dev_priv) <= 10)
+ if (GRAPHICS_VER(dev_priv) <= 10)
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (GRAPHICS_VER(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -527,20 +538,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -611,12 +622,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
- if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
+ if (GRAPHICS_VER(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+ if (IS_GRAPHICS_VER(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
intel_uncore_read(uncore, DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
@@ -634,7 +645,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_uncore_read(uncore, MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
intel_uncore_read(uncore, TILECTL));
- if (INTEL_GEN(dev_priv) >= 8)
+ if (GRAPHICS_VER(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
intel_uncore_read(uncore, GAMTARBMODE));
else
@@ -945,7 +956,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
atomic_inc(&gt->user_wakeref);
intel_gt_pm_get(gt);
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
return 0;
@@ -956,7 +967,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
struct drm_i915_private *i915 = inode->i_private;
struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
intel_uncore_forcewake_user_put(&i915->uncore);
intel_gt_pm_put(gt);
atomic_dec(&gt->user_wakeref);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5118dc8386b2..d5b3c5ba6bd2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,7 +49,7 @@
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
-#include "display/intel_csr.h"
+#include "display/intel_dmc.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -106,12 +106,12 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
static int
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
{
- int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_GEN(dev_priv) >= 4)
+ if (GRAPHICS_VER(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -138,7 +138,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
return ret;
}
- if (INTEL_GEN(dev_priv) >= 4)
+ if (GRAPHICS_VER(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -151,7 +151,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
static void
intel_setup_mchbar(struct drm_i915_private *dev_priv)
{
- int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -190,7 +190,7 @@ intel_setup_mchbar(struct drm_i915_private *dev_priv)
static void
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
{
- int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
@@ -475,7 +475,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
goto mask_err;
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN(i915, 2))
+ if (GRAPHICS_VER(i915) == 2)
mask_size = 30;
/*
@@ -601,7 +601,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* device. The kernel then disables that interrupt source and so
* prevents the other device from working properly.
*/
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (GRAPHICS_VER(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
drm_dbg(&dev_priv->drm, "can't enable MSI");
}
@@ -729,7 +729,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_platform_name(INTEL_INFO(dev_priv)->platform),
intel_subplatform(RUNTIME_INFO(dev_priv),
INTEL_INFO(dev_priv)->platform),
- INTEL_GEN(dev_priv));
+ GRAPHICS_VER(dev_priv));
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
@@ -803,7 +803,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
- if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
+ if (GRAPHICS_VER(i915) >= 9 && i915_selftest.live < 0 &&
i915->params.fake_lmem_start) {
mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
@@ -1043,7 +1043,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
- intel_csr_ucode_suspend(i915);
+ intel_dmc_ucode_suspend(i915);
/*
* The only requirement is to reboot with display DC states disabled,
@@ -1124,7 +1124,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_csr_ucode_suspend(dev_priv);
+ intel_dmc_ucode_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1182,7 +1182,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_GEN(dev_priv) < 6))
+ if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
out:
@@ -1226,7 +1226,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_ggtt_resume(&dev_priv->ggtt);
- intel_csr_ucode_resume(dev_priv);
+ intel_dmc_ucode_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9aee6a045590..7b7918f72c41 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "display/intel_bios.h"
#include "display/intel_display.h"
#include "display/intel_display_power.h"
+#include "display/intel_dmc.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
@@ -328,23 +329,6 @@ struct drm_i915_display_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
-struct intel_csr {
- struct work_struct work;
- const char *fw_path;
- u32 required_version;
- u32 max_fw_size; /* bytes */
- u32 *dmc_payload;
- u32 dmc_fw_size; /* dwords */
- u32 version;
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -589,6 +573,8 @@ i915_fence_timeout(const struct drm_i915_private *i915)
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
+#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
+
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
struct intel_bios_encoder_data *devdata;
@@ -824,7 +810,7 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_csr csr;
+ struct intel_dmc dmc;
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
@@ -1138,6 +1124,9 @@ struct drm_i915_private {
u8 framestart_delay;
+ /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
+ u8 window2_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1558,9 +1547,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ALDERLAKE_P(__i915) && \
IS_GT_STEP(__i915, since, until))
-#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
+#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
@@ -1580,12 +1569,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
-#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
-#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
+#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
#define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
@@ -1618,7 +1607,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
- (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+ (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -1626,23 +1615,22 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SKL_GT3(dev_priv) || \
IS_SKL_GT4(dev_priv))
-#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
-#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+#define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 10 || \
IS_GEMINILAKE(dev_priv) || \
IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
- !(IS_I915G(dev_priv) || \
- IS_I915GM(dev_priv)))
+#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
+ !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2)
#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1653,7 +1641,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
+#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12)
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
@@ -1662,9 +1650,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
-#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
+#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
-#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12)
+#define HAS_MSO(i915) (GRAPHICS_VER(i915) >= 12)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
@@ -1683,7 +1671,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
-#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
+#define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
@@ -1697,7 +1685,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
-#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
@@ -1724,7 +1712,7 @@ static inline bool intel_vtd_active(void)
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
+ return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active();
}
static inline bool
@@ -1942,7 +1930,7 @@ int remap_io_sg(struct vm_area_struct *vma,
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 10)
+ if (GRAPHICS_VER(i915) >= 10)
return CNL_HWS_CSB_WRITE_INDEX;
else
return I915_HWS_CSB_WRITE_INDEX;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cffd7f4f87dc..1aadc021d92e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -442,7 +442,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
/* PREAD is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
- if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
@@ -722,7 +722,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
/* PWRITE is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
- if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 99ca242ec13b..35c97c39f125 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -36,7 +36,7 @@
#include <drm/drm_print.h>
-#include "display/intel_csr.h"
+#include "display/intel_dmc.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -435,13 +435,13 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
ee->instdone.slice_common);
- if (INTEL_GEN(m->i915) <= 6)
+ if (GRAPHICS_VER(m->i915) <= 6)
return;
for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
@@ -454,7 +454,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
slice, subslice,
ee->instdone.row[slice][subslice]);
- if (INTEL_GEN(m->i915) < 12)
+ if (GRAPHICS_VER(m->i915) < 12)
return;
err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
@@ -543,7 +543,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
upper_32_bits(start), lower_32_bits(start),
upper_32_bits(end), lower_32_bits(end));
}
- if (INTEL_GEN(m->i915) >= 4) {
+ if (GRAPHICS_VER(m->i915) >= 4) {
err_printf(m, " BBADDR: 0x%08x_%08x\n",
(u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
@@ -552,14 +552,14 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
lower_32_bits(ee->faddr));
- if (INTEL_GEN(m->i915) >= 6) {
+ if (GRAPHICS_VER(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
- if (INTEL_GEN(m->i915) >= 8) {
+ if (GRAPHICS_VER(m->i915) >= 8) {
int i;
for (i = 0; i < 4; i++)
err_printf(m, " PDP%d: 0x%016llx\n",
@@ -706,25 +706,25 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
for (i = 0; i < gt->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
- if (IS_GEN_RANGE(m->i915, 6, 11)) {
+ if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", gt->error);
err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
}
- if (INTEL_GEN(m->i915) >= 8)
+ if (GRAPHICS_VER(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
gt->fault_data1, gt->fault_data0);
- if (IS_GEN(m->i915, 7))
+ if (GRAPHICS_VER(m->i915) == 7)
err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
- if (IS_GEN_RANGE(m->i915, 8, 11))
+ if (IS_GRAPHICS_VER(m->i915, 8, 11))
err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
- if (IS_GEN(m->i915, 12))
+ if (GRAPHICS_VER(m->i915) == 12)
err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
- if (INTEL_GEN(m->i915) >= 12) {
+ if (GRAPHICS_VER(m->i915) >= 12) {
int i;
for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
@@ -788,14 +788,14 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(m->i915)) {
- struct intel_csr *csr = &m->i915->csr;
+ if (HAS_DMC(m->i915)) {
+ struct intel_dmc *dmc = &m->i915->dmc;
err_printf(m, "DMC loaded: %s\n",
- yesno(csr->dmc_payload != NULL));
+ yesno(intel_dmc_has_payload(m->i915) != 0));
err_printf(m, "DMC fw version: %d.%d\n",
- CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
@@ -1092,12 +1092,12 @@ static void gt_record_fences(struct intel_gt_coredump *gt)
struct intel_uncore *uncore = gt->_gt->uncore;
int i;
- if (INTEL_GEN(uncore->i915) >= 6) {
+ if (GRAPHICS_VER(uncore->i915) >= 6) {
for (i = 0; i < ggtt->num_fences; i++)
gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
- } else if (INTEL_GEN(uncore->i915) >= 4) {
+ } else if (GRAPHICS_VER(uncore->i915) >= 4) {
for (i = 0; i < ggtt->num_fences; i++)
gt->fence[i] =
intel_uncore_read64(uncore,
@@ -1115,20 +1115,20 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
const struct intel_engine_cs *engine = ee->engine;
struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
ee->fault_reg = intel_uncore_read(engine->uncore,
GEN12_RING_FAULT_REG);
- else if (INTEL_GEN(i915) >= 8)
+ else if (GRAPHICS_VER(i915) >= 8)
ee->fault_reg = intel_uncore_read(engine->uncore,
GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
- if (INTEL_GEN(i915) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
ee->esr = ENGINE_READ(engine, RING_ESR);
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
@@ -1136,7 +1136,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
ee->ccid = ENGINE_READ(engine, CCID);
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
@@ -1155,13 +1155,13 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
- if (INTEL_GEN(i915) > 2)
+ if (GRAPHICS_VER(i915) > 2)
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(i915)) {
i915_reg_t mmio;
- if (IS_GEN(i915, 7)) {
+ if (GRAPHICS_VER(i915) == 7) {
switch (engine->id) {
default:
MISSING_CASE(engine->id);
@@ -1179,7 +1179,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(engine->i915, 6)) {
+ } else if (GRAPHICS_VER(engine->i915) == 6) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -1196,13 +1196,13 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
- if (IS_GEN(i915, 6)) {
+ if (GRAPHICS_VER(i915) == 6) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
- } else if (IS_GEN(i915, 7)) {
+ } else if (GRAPHICS_VER(i915) == 7) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
@@ -1534,52 +1534,52 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
- if (IS_GEN(i915, 7))
+ if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
gt->fault_data0 = intel_uncore_read(uncore,
GEN12_FAULT_TLB_DATA0);
gt->fault_data1 = intel_uncore_read(uncore,
GEN12_FAULT_TLB_DATA1);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
gt->fault_data0 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA0);
gt->fault_data1 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN(i915, 6)) {
+ if (GRAPHICS_VER(i915) == 6) {
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_GEN(i915) >= 7)
+ if (GRAPHICS_VER(i915) >= 7)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
gt->derrmr = intel_uncore_read(uncore, DERRMR);
- if (INTEL_GEN(i915) < 12) {
+ if (GRAPHICS_VER(i915) < 12) {
gt->error = intel_uncore_read(uncore, ERROR_GEN6);
gt->done_reg = intel_uncore_read(uncore, DONE_REG);
}
}
/* 3: Feature specific registers */
- if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GRAPHICS_VER(i915, 6, 7)) {
gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
- if (IS_GEN_RANGE(i915, 8, 11))
+ if (IS_GRAPHICS_VER(i915, 8, 11))
gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
- if (IS_GEN(i915, 12))
+ if (GRAPHICS_VER(i915) == 12)
gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
@@ -1589,7 +1589,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
}
/* 4: Everything else */
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
gt->gtier[0] =
intel_uncore_read(uncore,
@@ -1608,7 +1608,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
intel_uncore_read(uncore,
GEN11_GUNIT_CSME_INTR_ENABLE);
gt->ngtier = 6;
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
gt->gtier[i] =
@@ -1618,7 +1618,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->ier = intel_uncore_read(uncore, DEIER);
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
- } else if (IS_GEN(i915, 2)) {
+ } else if (GRAPHICS_VER(i915) == 2) {
gt->ier = intel_uncore_read16(uncore, GEN2_IER);
} else if (!IS_VALLEYVIEW(i915)) {
gt->ier = intel_uncore_read(uncore, GEN2_IER);
@@ -1674,7 +1674,7 @@ static const char *error_msg(struct i915_gpu_coredump *error)
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%x:%08x",
- INTEL_GEN(error->i915), hung_classes,
+ GRAPHICS_VER(error->i915), hung_classes,
generate_ecode(first));
if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ff511862848..a11bdb667241 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2175,7 +2175,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
gt_iir = raw_reg_read(regs, GTIIR);
if (gt_iir) {
raw_reg_write(regs, GTIIR, gt_iir);
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
gen6_gt_irq_handler(&i915->gt, gt_iir);
else
gen5_gt_irq_handler(&i915->gt, gt_iir);
@@ -2192,7 +2192,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
raw_reg_write(regs, GEN6_PMIIR, pm_iir);
@@ -2425,6 +2425,17 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
+u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ mask |= XELPD_PIPE_SOFT_UNDERRUN |
+ XELPD_PIPE_HARD_UNDERRUN;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2536,7 +2547,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
- if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+ if (iir & gen8_de_pipe_underrun_mask(dev_priv))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
@@ -3028,7 +3039,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, DE);
dev_priv->irq_mask = ~0u;
- if (IS_GEN(dev_priv, 7))
+ if (GRAPHICS_VER(dev_priv) == 7)
intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
@@ -3173,7 +3184,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ u32 extra_ier = GEN8_PIPE_VBLANK |
+ gen8_de_pipe_underrun_mask(dev_priv) |
gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
@@ -3646,7 +3658,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (GRAPHICS_VER(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -3757,7 +3769,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
de_pipe_enables = de_pipe_masked |
- GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ GEN8_PIPE_VBLANK |
+ gen8_de_pipe_underrun_mask(dev_priv) |
gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
@@ -4317,7 +4330,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->l3_parity.remap_info[i] = NULL;
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
- if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
+ if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
if (!HAS_DISPLAY(dev_priv))
@@ -4388,18 +4401,18 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
return cherryview_irq_handler;
else if (IS_VALLEYVIEW(dev_priv))
return valleyview_irq_handler;
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
return i965_irq_handler;
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
return i915_irq_handler;
else
return i8xx_irq_handler;
} else {
if (HAS_MASTER_UNIT_IRQ(dev_priv))
return dg1_irq_handler;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
return gen11_irq_handler;
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
return gen8_irq_handler;
else
return ilk_irq_handler;
@@ -4413,16 +4426,16 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
cherryview_irq_reset(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_irq_reset(dev_priv);
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_reset(dev_priv);
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
i915_irq_reset(dev_priv);
else
i8xx_irq_reset(dev_priv);
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
gen11_irq_reset(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
gen8_irq_reset(dev_priv);
else
ilk_irq_reset(dev_priv);
@@ -4436,16 +4449,16 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
cherryview_irq_postinstall(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_irq_postinstall(dev_priv);
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_postinstall(dev_priv);
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
i915_irq_postinstall(dev_priv);
else
i8xx_irq_postinstall(dev_priv);
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
gen11_irq_postinstall(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
gen8_irq_postinstall(dev_priv);
else
ilk_irq_postinstall(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 25f25cd95818..db34d5dbe402 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -100,6 +100,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
+u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv);
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
ktime_t *vblank_time, bool in_vblank_irq);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 574881c0e339..83b500bb170c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -643,7 +643,7 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .display.has_csr = 1, \
+ .display.has_dmc = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
@@ -698,7 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
- .display.has_csr = 1, \
+ .display.has_dmc = 1, \
.has_rc6 = 1, \
.has_rps = true, \
.display.has_dp_mst = 1, \
@@ -953,6 +953,7 @@ static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
+ .has_cdclk_crawl = 1,
.require_force_probe = 1,
.display.has_modular_fia = 1,
.platform_engine_mask =
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index cb03e4152d2d..9f94914958c3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -719,7 +719,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- (IS_GEN(stream->perf->i915, 12) ?
+ (GRAPHICS_VER(stream->perf->i915) == 12 ?
OAREPORT_REASON_MASK_EXTENDED :
OAREPORT_REASON_MASK));
@@ -734,7 +734,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* understand that the ID has been squashed by the kernel.
*/
if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
- INTEL_GEN(stream->perf->i915) <= 11)
+ GRAPHICS_VER(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -801,7 +801,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (start_offset != *offset) {
i915_reg_t oaheadptr;
- oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -854,7 +854,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
- oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
GEN12_OAG_OASTATUS : GEN8_OASTATUS;
oastatus = intel_uncore_read(uncore, oastatus_reg);
@@ -901,7 +901,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
- IS_GEN_RANGE(uncore->i915, 8, 11) ?
+ IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
@@ -1243,7 +1243,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(ce->engine->i915)) {
+ switch (GRAPHICS_VER(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
@@ -1297,7 +1297,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
}
default:
- MISSING_CASE(INTEL_GEN(ce->engine->i915));
+ MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
}
ce->tag = stream->specific_ctx_id;
@@ -1602,7 +1602,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
cmd |= MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(stream->perf->i915) >= 8)
+ if (GRAPHICS_VER(stream->perf->i915) >= 8)
cmd++;
for (d = 0; d < dword_count; d++) {
@@ -1731,7 +1731,7 @@ retry:
*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
/* Restart from the beginning if we had timestamps roll over. */
- *cs++ = (INTEL_GEN(i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8) |
MI_BATCH_PREDICATE;
@@ -1768,7 +1768,7 @@ retry:
*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
/* Predicate the jump. */
- *cs++ = (INTEL_GEN(i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8) |
MI_BATCH_PREDICATE;
@@ -1892,7 +1892,7 @@ retry:
oa_config->flex_regs_len);
/* Jump into the active wait. */
- *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8);
*cs++ = i915_ggtt_offset(stream->noa_wait);
@@ -2492,7 +2492,7 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
intel_uncore_write(uncore, GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2797,7 +2797,7 @@ get_default_sseu_config(struct intel_sseu *out_sseu,
*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
- if (IS_GEN(engine->i915, 11)) {
+ if (GRAPHICS_VER(engine->i915) == 11) {
/*
* We only need subslice count so it doesn't matter which ones
* we select - just turn off low bits in the amount of half of
@@ -2864,7 +2864,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
- (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
+ (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
DRM_DEBUG("Only OA report sampling supported\n");
return -EINVAL;
}
@@ -3006,7 +3006,7 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
stream = READ_ONCE(engine->i915->perf.exclusive_stream);
- if (stream && INTEL_GEN(stream->perf->i915) < 12)
+ if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
@@ -3443,7 +3443,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
*/
if (IS_HASWELL(perf->i915) && specific_ctx)
privileged_op = false;
- else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
(props->sample_flags & SAMPLE_OA_REPORT) == 0)
privileged_op = false;
@@ -4119,7 +4119,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
oa_config->b_counter_regs = regs;
- if (INTEL_GEN(perf->i915) < 8) {
+ if (GRAPHICS_VER(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
@@ -4365,7 +4365,7 @@ void i915_perf_init(struct drm_i915_private *i915)
*/
perf->ops.read = gen8_oa_read;
- if (IS_GEN_RANGE(i915, 8, 9)) {
+ if (IS_GRAPHICS_VER(i915, 8, 9)) {
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4384,7 +4384,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.disable_metric_set = gen8_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(i915, 8)) {
+ if (GRAPHICS_VER(i915) == 8) {
perf->ctx_oactxctrl_offset = 0x120;
perf->ctx_flexeu0_offset = 0x2ce;
@@ -4395,7 +4395,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ } else if (IS_GRAPHICS_VER(i915, 10, 11)) {
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4409,7 +4409,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.disable_metric_set = gen10_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(i915, 10)) {
+ if (GRAPHICS_VER(i915) == 10) {
perf->ctx_oactxctrl_offset = 0x128;
perf->ctx_flexeu0_offset = 0x3de;
} else {
@@ -4417,7 +4417,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ctx_flexeu0_offset = 0x78e;
}
perf->gen8_valid_ctx_bit = BIT(16);
- } else if (IS_GEN(i915, 12)) {
+ } else if (GRAPHICS_VER(i915) == 12) {
perf->ops.is_valid_b_counter_reg =
gen12_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index a75cd1db320b..22c4d4178766 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -287,7 +287,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
* risk a machine hang. For a fun history lesson dig out the old
* userspace intel_gpu_top and run it on Ivybridge or Haswell!
*/
- return IS_GEN(i915, 7);
+ return GRAPHICS_VER(i915) == 7;
}
static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
@@ -463,7 +463,7 @@ engine_event_status(struct intel_engine_cs *engine,
case I915_SAMPLE_WAIT:
break;
case I915_SAMPLE_SEMA:
- if (INTEL_GEN(engine->i915) < 6)
+ if (GRAPHICS_VER(engine->i915) < 6)
return -ENODEV;
break;
default:
@@ -485,7 +485,7 @@ config_status(struct drm_i915_private *i915, u64 config)
return -ENODEV;
fallthrough;
case I915_PMU_REQUESTED_FREQUENCY:
- if (INTEL_GEN(i915) < 6)
+ if (GRAPHICS_VER(i915) < 6)
return -ENODEV;
break;
case I915_PMU_INTERRUPTS:
@@ -1147,7 +1147,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
- if (INTEL_GEN(i915) <= 2) {
+ if (GRAPHICS_VER(i915) <= 2) {
drm_info(&i915->drm, "PMU not supported for this GPU.");
return;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c294e3f93bed..e915ec034c98 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2941,6 +2941,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
@@ -4382,6 +4391,8 @@ enum {
#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -6141,6 +6152,10 @@ enum {
#define _PIPEBGCMAX 0x71010
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
@@ -6158,12 +6173,26 @@ enum {
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN (0x50 << 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS (0x14 << 24)
+#define PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK (0xff << 24)
+#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A)
+
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31)
#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
+#define _ICL_PIPE_A_STATUS 0x70058
+#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+#define PIPE_STATUS_UNDERRUN REG_BIT(31)
+#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
+#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
+#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
+
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
#define PIPEB_HLINE_INT_EN (1 << 28)
@@ -6432,16 +6461,28 @@ enum {
/* Watermark register definitions for SKL */
#define _CUR_WM_A_0 0x70140
#define _CUR_WM_B_0 0x71140
+#define _CUR_WM_SAGV_A 0x70158
+#define _CUR_WM_SAGV_B 0x71158
+#define _CUR_WM_SAGV_TRANS_A 0x7015C
+#define _CUR_WM_SAGV_TRANS_B 0x7115C
+#define _CUR_WM_TRANS_A 0x70168
+#define _CUR_WM_TRANS_B 0x71168
#define _PLANE_WM_1_A_0 0x70240
#define _PLANE_WM_1_B_0 0x71240
#define _PLANE_WM_2_A_0 0x70340
#define _PLANE_WM_2_B_0 0x71340
-#define _PLANE_WM_TRANS_1_A_0 0x70268
-#define _PLANE_WM_TRANS_1_B_0 0x71268
-#define _PLANE_WM_TRANS_2_A_0 0x70368
-#define _PLANE_WM_TRANS_2_B_0 0x71368
-#define _CUR_WM_TRANS_A_0 0x70168
-#define _CUR_WM_TRANS_B_0 0x71168
+#define _PLANE_WM_SAGV_1_A 0x70258
+#define _PLANE_WM_SAGV_1_B 0x71258
+#define _PLANE_WM_SAGV_2_A 0x70358
+#define _PLANE_WM_SAGV_2_B 0x71358
+#define _PLANE_WM_SAGV_TRANS_1_A 0x7025C
+#define _PLANE_WM_SAGV_TRANS_1_B 0x7125C
+#define _PLANE_WM_SAGV_TRANS_2_A 0x7035C
+#define _PLANE_WM_SAGV_TRANS_2_B 0x7135C
+#define _PLANE_WM_TRANS_1_A 0x70268
+#define _PLANE_WM_TRANS_1_B 0x71268
+#define _PLANE_WM_TRANS_2_A 0x70368
+#define _PLANE_WM_TRANS_2_B 0x71368
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_IGNORE_LINES (1 << 30)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
@@ -6449,19 +6490,32 @@ enum {
#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
-#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0)
-
+#define CUR_WM_SAGV(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_A, _CUR_WM_SAGV_B)
+#define CUR_WM_SAGV_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_TRANS_A, _CUR_WM_SAGV_TRANS_B)
+#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A, _CUR_WM_TRANS_B)
#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
-#define _PLANE_WM_BASE(pipe, plane) \
- _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
-#define PLANE_WM(pipe, plane, level) \
- _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
-#define _PLANE_WM_TRANS_1(pipe) \
- _PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0)
-#define _PLANE_WM_TRANS_2(pipe) \
- _PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0)
-#define PLANE_WM_TRANS(pipe, plane) \
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_SAGV_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_1_A, _PLANE_WM_SAGV_1_B)
+#define _PLANE_WM_SAGV_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_2_A, _PLANE_WM_SAGV_2_B)
+#define PLANE_WM_SAGV(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_1(pipe), _PLANE_WM_SAGV_2(pipe)))
+#define _PLANE_WM_SAGV_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_1_A, _PLANE_WM_SAGV_TRANS_1_B)
+#define _PLANE_WM_SAGV_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_2_A, _PLANE_WM_SAGV_TRANS_2_B)
+#define PLANE_WM_SAGV_TRANS(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_TRANS_1(pipe), _PLANE_WM_SAGV_TRANS_2(pipe)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_1_A, _PLANE_WM_TRANS_1_B)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_2_A, _PLANE_WM_TRANS_2_B)
+#define PLANE_WM_TRANS(pipe, plane) \
_MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
/* define the Watermark register on Ironlake */
@@ -6566,6 +6620,8 @@ enum {
#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */
+#define MCURSOR_ARB_SLOTS(x) REG_FIELD_PREP(MCURSOR_ARB_SLOTS_MASK, (x)) /* icl+ */
#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
@@ -7017,6 +7073,8 @@ enum {
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
+#define PLANE_CTL_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */
+#define PLANE_CTL_ARB_SLOTS(x) REG_FIELD_PREP(PLANE_CTL_ARB_SLOTS_MASK, (x)) /* icl+ */
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
/*
@@ -7280,7 +7338,7 @@ enum {
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
-#define DDB_ENTRY_MASK 0x7FF /* skl+: 10 bits, icl+ 11 bits */
+#define DDB_ENTRY_MASK 0xFFF /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
#define DDB_ENTRY_END_SHIFT 16
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
@@ -7692,20 +7750,20 @@ enum {
#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
-/* DMC/CSR */
-#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
-#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
-#define CSR_HTP_ADDR_SKL 0x00500034
-#define CSR_SSP_BASE _MMIO(0x8F074)
-#define CSR_HTP_SKL _MMIO(0x8F004)
-#define CSR_LAST_WRITE _MMIO(0x8F034)
-#define CSR_LAST_WRITE_VALUE 0xc003b400
-/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
-#define CSR_MMIO_START_RANGE 0x80000
-#define CSR_MMIO_END_RANGE 0x8FFFF
-#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
-#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
-#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+/* DMC */
+#define DMC_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
+#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define DMC_HTP_ADDR_SKL 0x00500034
+#define DMC_SSP_BASE _MMIO(0x8F074)
+#define DMC_HTP_SKL _MMIO(0x8F004)
+#define DMC_LAST_WRITE _MMIO(0x8F034)
+#define DMC_LAST_WRITE_VALUE 0xc003b400
+/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
+#define DMC_MMIO_START_RANGE 0x80000
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
+#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
+#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
@@ -7820,6 +7878,8 @@ enum {
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
+#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
+#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
@@ -8115,13 +8175,29 @@ enum {
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
-#define _DBUF_CTL_S1 0x45008
-#define _DBUF_CTL_S2 0x44FE8
-#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
+/*
+ * The below are numbered starting from "S1" on gen11/gen12, but starting
+ * with gen13 display, the bspec switches to a 0-based numbering scheme
+ * (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
+ * We'll just use the 0-based numbering here for all platforms since it's the
+ * way things will be named by the hardware team going forward, plus it's more
+ * consistent with how most of the rest of our registers are named.
+ */
+#define _DBUF_CTL_S0 0x45008
+#define _DBUF_CTL_S1 0x44FE8
+#define _DBUF_CTL_S2 0x44300
+#define _DBUF_CTL_S3 0x44304
+#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
+ _DBUF_CTL_S0, \
+ _DBUF_CTL_S1, \
+ _DBUF_CTL_S2, \
+ _DBUF_CTL_S3))
#define DBUF_POWER_REQUEST REG_BIT(31)
#define DBUF_POWER_STATE REG_BIT(30)
#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
+#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
+#define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
@@ -8306,6 +8382,7 @@ enum {
#define _PIPEC_CHICKEN 0x72038
#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
_PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE REG_BIT(30)
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
@@ -9831,7 +9908,7 @@ enum skl_power_gate {
#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
_TRANSB_HDCP_CONF)
#define HDCP_CONF(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_CONF(trans) : \
PORT_HDCP_CONF(port))
@@ -9844,7 +9921,7 @@ enum skl_power_gate {
_TRANSA_HDCP_ANINIT, \
_TRANSB_HDCP_ANINIT)
#define HDCP_ANINIT(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANINIT(trans) : \
PORT_HDCP_ANINIT(port))
@@ -9854,7 +9931,7 @@ enum skl_power_gate {
#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
_TRANSB_HDCP_ANLO)
#define HDCP_ANLO(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANLO(trans) : \
PORT_HDCP_ANLO(port))
@@ -9864,7 +9941,7 @@ enum skl_power_gate {
#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
_TRANSB_HDCP_ANHI)
#define HDCP_ANHI(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANHI(trans) : \
PORT_HDCP_ANHI(port))
@@ -9875,7 +9952,7 @@ enum skl_power_gate {
_TRANSA_HDCP_BKSVLO, \
_TRANSB_HDCP_BKSVLO)
#define HDCP_BKSVLO(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_BKSVLO(trans) : \
PORT_HDCP_BKSVLO(port))
@@ -9886,7 +9963,7 @@ enum skl_power_gate {
_TRANSA_HDCP_BKSVHI, \
_TRANSB_HDCP_BKSVHI)
#define HDCP_BKSVHI(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_BKSVHI(trans) : \
PORT_HDCP_BKSVHI(port))
@@ -9897,7 +9974,7 @@ enum skl_power_gate {
_TRANSA_HDCP_RPRIME, \
_TRANSB_HDCP_RPRIME)
#define HDCP_RPRIME(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_RPRIME(trans) : \
PORT_HDCP_RPRIME(port))
@@ -9908,7 +9985,7 @@ enum skl_power_gate {
_TRANSA_HDCP_STATUS, \
_TRANSB_HDCP_STATUS)
#define HDCP_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_STATUS(trans) : \
PORT_HDCP_STATUS(port))
@@ -9949,7 +10026,7 @@ enum skl_power_gate {
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
#define HDCP2_AUTH(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_AUTH(trans) : \
PORT_HDCP2_AUTH(port))
@@ -9960,7 +10037,7 @@ enum skl_power_gate {
_TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
#define HDCP2_CTL(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_CTL(trans) : \
PORT_HDCP2_CTL(port))
@@ -9974,7 +10051,7 @@ enum skl_power_gate {
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
#define HDCP2_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
@@ -9996,7 +10073,7 @@ enum skl_power_gate {
#define STREAM_ENCRYPTION_STATUS BIT(31)
#define STREAM_TYPE_STATUS BIT(30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
@@ -10012,7 +10089,7 @@ enum skl_power_gate {
_TRANSB_HDCP2_AUTH_STREAM)
#define AUTH_STREAM_TYPE BIT(31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
PORT_HDCP2_AUTH_STREAM(port))
@@ -10128,8 +10205,10 @@ enum skl_power_gate {
#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
+#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
#define DDI_BUF_PORT_REVERSAL (1 << 16)
#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
@@ -10492,6 +10571,14 @@ enum skl_power_gate {
#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
_MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
#define _MG_REFCLKIN_CTL_PORT1 0x16892C
#define _MG_REFCLKIN_CTL_PORT2 0x16992C
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
@@ -10906,6 +10993,8 @@ enum skl_power_gate {
#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
#define CNL_CDCLK_PLL_RATIO(x) (x)
#define CNL_CDCLK_PLL_RATIO_MASK 0xff
@@ -11280,6 +11369,12 @@ enum skl_power_gate {
#define ICL_ESC_CLK_DIV_SHIFT 0
#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+#define _ADL_MIPIO_REG 0x180
+#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw))
+#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16)
+#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16)
+#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f)
+
#define _DSI_CMD_FRMCTL_0 0x6b034
#define _DSI_CMD_FRMCTL_1 0x6b834
#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
@@ -12566,6 +12661,15 @@ enum skl_power_gate {
#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
/* This register controls the Display State Buffer (DSB) engines. */
#define _DSBSL_INSTANCE_BASE 0x70B00
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 970d8f4986bb..1014c71cf7f5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1176,12 +1176,12 @@ __emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{
- const int has_token = INTEL_GEN(to->engine->i915) >= 12;
+ const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
u32 hwsp_offset;
int len, err;
u32 *cs;
- GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
@@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5fcc32821e18..f7b55f34dba8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -38,14 +38,14 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
int i;
/* Scratch space */
- if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+ if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (GRAPHICS_VER(dev_priv) == 2) {
for (i = 0; i < 7; i++)
dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
@@ -63,14 +63,14 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
int i;
/* Scratch space */
- if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+ if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (GRAPHICS_VER(dev_priv) == 2) {
for (i = 0; i < 7; i++)
intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
@@ -91,10 +91,10 @@ void i915_save_display(struct drm_i915_private *dev_priv)
return;
/* Display arbitration control */
- if (INTEL_GEN(dev_priv) <= 4)
+ if (GRAPHICS_VER(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
- if (IS_GEN(dev_priv, 4))
+ if (GRAPHICS_VER(dev_priv) == 4)
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -110,12 +110,12 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
intel_restore_swf(dev_priv);
- if (IS_GEN(dev_priv, 4))
+ if (GRAPHICS_VER(dev_priv) == 4)
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
/* Display arbitration */
- if (INTEL_GEN(dev_priv) <= 4)
+ if (GRAPHICS_VER(dev_priv) <= 4)
intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2744558f3050..c589a681da77 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 4c6b5d52b5ca..b099e09ccc32 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -595,7 +595,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
ret = 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
- else if (INTEL_GEN(dev_priv) >= 6)
+ else if (GRAPHICS_VER(dev_priv) >= 6)
ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 172799277dd5..31a105bc1792 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -74,7 +74,7 @@ void intel_vgpu_detect(struct drm_i915_private *dev_priv)
* we do not support VGT on older gens, return early so we don't have
* to consider differently numbered or sized MMIO bars
*/
- if (INTEL_GEN(dev_priv) < 6)
+ if (GRAPHICS_VER(dev_priv) < 6)
return;
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a6cd0fa62847..b319fd3f91cc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -274,7 +274,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8df784a026d2..dc6926d89626 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -68,6 +68,11 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
+static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
+{
+ return i915_is_dpt(vma->vm);
+}
+
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
@@ -158,7 +163,7 @@ i915_vma_compare(struct i915_vma *vma,
{
ptrdiff_t cmp;
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
cmp = ptrdiff(vma->vm, vm);
if (cmp)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 8cb58a238c68..7eaa92fee421 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -257,10 +257,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 0;
- else if (INTEL_GEN(dev_priv) >= 10) {
+ else if (GRAPHICS_VER(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 2;
- } else if (IS_GEN(dev_priv, 9)) {
+ } else if (GRAPHICS_VER(dev_priv) == 9) {
runtime->num_scalers[PIPE_A] = 2;
runtime->num_scalers[PIPE_B] = 2;
runtime->num_scalers[PIPE_C] = 1;
@@ -271,10 +271,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 4;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (GRAPHICS_VER(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
- else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
+ else if (GRAPHICS_VER(dev_priv) == 10 || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
@@ -293,12 +293,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
+ } else if (GRAPHICS_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 1;
}
- if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
@@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
+ } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +340,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- if (INTEL_GEN(dev_priv) >= 12 &&
+ if (GRAPHICS_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
info->pipe_mask &= ~BIT(PIPE_D);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,15 +352,15 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
info->display.has_fbc = 0;
- if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- info->display.has_csr = 0;
+ if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_dmc = 0;
- if (INTEL_GEN(dev_priv) >= 10 &&
+ if (GRAPHICS_VER(dev_priv) >= 10 &&
(dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
info->display.has_dsc = 0;
}
- if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
+ if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e98b36959736..b326aff65cd6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -141,7 +141,7 @@ enum intel_ppgtt_type {
#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(has_csr); \
+ func(has_dmc); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
@@ -185,6 +185,8 @@ struct intel_device_info {
u8 abox_mask;
+ u8 has_cdclk_crawl; /* does support CDCLK crawling */
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 1e53c017c30d..50fdea84ba70 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -121,7 +121,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
struct dram_dimm_info *dimm,
int channel, char dimm_name, u16 val)
{
- if (INTEL_GEN(i915) >= 10) {
+ if (GRAPHICS_VER(i915) >= 10) {
dimm->size = cnl_get_dimm_size(val);
dimm->width = cnl_get_dimm_width(val);
dimm->ranks = cnl_get_dimm_ranks(val);
@@ -422,7 +422,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- if (IS_GEN(dev_priv, 12)) {
+ if (GRAPHICS_VER(dev_priv) == 12) {
switch (val & 0xf) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
@@ -501,12 +501,12 @@ void intel_dram_detect(struct drm_i915_private *i915)
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
- if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ if (GRAPHICS_VER(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
- else if (INTEL_GEN(i915) >= 11)
+ else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
@@ -535,7 +535,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
{
u32 edram_cap = 0;
- if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
return;
edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
@@ -549,7 +549,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128MB always.
*/
- if (INTEL_GEN(i915) < 9)
+ if (GRAPHICS_VER(i915) < 9)
i915->edram_size_mb = 128;
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 98a17dd1bda4..4e92ae19189e 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,17 +13,17 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
+ drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -181,9 +181,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 15d9a64e7b4c..45fefa0ed160 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2983,7 +2983,9 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (HAS_HW_SAGV_WM(dev_priv))
+ return 5;
+ else if (DISPLAY_VER(dev_priv) >= 9)
return 7;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
@@ -4011,8 +4013,9 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* latter from the plane commit hooks (especially in the legacy
* cursor case)
*/
- pipe_wm->use_sagv_wm = DISPLAY_VER(dev_priv) >= 12 &&
- intel_can_enable_sagv(dev_priv, new_bw_state);
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
+ DISPLAY_VER(dev_priv) >= 12 &&
+ intel_can_enable_sagv(dev_priv, new_bw_state);
}
if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
@@ -4054,6 +4057,20 @@ skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
}
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
+}
+
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
@@ -4146,6 +4163,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
struct intel_crtc_state *crtc_state;
struct skl_ddb_entry ddb_slices;
enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
u32 ddb_range_size;
u32 dbuf_slice_mask;
u32 start, end;
@@ -4160,6 +4178,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -4168,11 +4187,11 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
start = ddb_range_size * weight_start / weight_total;
end = ddb_range_size * weight_end / weight_total;
- new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
- new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
-
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
out:
- if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
&new_dbuf_state->ddb[pipe]))
return 0;
@@ -4184,7 +4203,12 @@ out:
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
@@ -4242,7 +4266,6 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *entry, u32 reg)
{
-
entry->start = reg & DDB_ENTRY_MASK;
entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
@@ -4367,6 +4390,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
struct dbuf_slice_conf_entry {
u8 active_pipes;
u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
};
/*
@@ -4555,6 +4579,137 @@ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
{}
};
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
@@ -4594,12 +4749,19 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
+static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+{
+ return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+}
+
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) == 12)
+ if (IS_ALDERLAKE_P(dev_priv))
+ return adlp_compute_dbuf_slices(pipe, active_pipes);
+ else if (DISPLAY_VER(dev_priv) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes);
else if (DISPLAY_VER(dev_priv) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes);
@@ -5619,6 +5781,13 @@ void skl_write_plane_wm(struct intel_plane *plane,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
skl_plane_trans_wm(pipe_wm, plane_id));
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
if (DISPLAY_VER(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
@@ -5652,6 +5821,15 @@ void skl_write_cursor_wm(struct intel_plane *plane,
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
skl_plane_trans_wm(pipe_wm, plane_id));
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
@@ -5813,16 +5991,29 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x)\n",
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->dbuf.slice_mask);
+ INTEL_INFO(dev_priv)->dbuf.slice_mask,
+ yesno(old_dbuf_state->joined_mbus),
+ yesno(new_dbuf_state->joined_mbus));
}
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -6016,6 +6207,15 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
skl_plane_trans_wm(new_pipe_wm, plane->id));
}
@@ -6234,7 +6434,25 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->trans_wm);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&dev_priv->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&dev_priv->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&dev_priv->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&dev_priv->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -6247,10 +6465,14 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
+ if (IS_ALDERLAKE_P(dev_priv))
+ dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
@@ -6276,13 +6498,20 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
- crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
- dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ yesno(dbuf_state->joined_mbus));
}
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
@@ -7630,9 +7859,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = adlp_init_clock_gating;
else if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
- else if (IS_GEN(dev_priv, 12))
+ else if (GRAPHICS_VER(dev_priv) == 12)
dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
- else if (IS_GEN(dev_priv, 11))
+ else if (GRAPHICS_VER(dev_priv) == 11)
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -7656,9 +7885,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
- else if (IS_GEN(dev_priv, 6))
+ else if (GRAPHICS_VER(dev_priv) == 6)
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -7666,11 +7895,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else if (IS_GEN(dev_priv, 2))
+ else if (GRAPHICS_VER(dev_priv) == 2)
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -7684,7 +7913,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
pnv_get_mem_freq(dev_priv);
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
if (intel_has_sagv(dev_priv))
@@ -7816,6 +8045,45 @@ int intel_dbuf_init(struct drm_i915_private *dev_priv)
return 0;
}
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!IS_ALDERLAKE_P(dev_priv))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(dev_priv, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(dev_priv, slice)
+ intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -7825,11 +8093,13 @@ void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_dbuf_state(state);
if (!new_dbuf_state ||
- new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
return;
WARN_ON(!new_dbuf_state->base.changed);
+ update_mbus_pre_enable(state);
gen9_dbuf_slices_update(dev_priv,
old_dbuf_state->enabled_slices |
new_dbuf_state->enabled_slices);
@@ -7844,7 +8114,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_dbuf_state(state);
if (!new_dbuf_state ||
- new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
return;
WARN_ON(!new_dbuf_state->base.changed);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 669c8d505677..91f23b7f0af2 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -78,13 +78,11 @@ struct intel_dbuf_state {
struct skl_ddb_entry ddb[I915_MAX_PIPES];
unsigned int weight[I915_MAX_PIPES];
u8 slices[I915_MAX_PIPES];
-
u8 enabled_slices;
u8 active_pipes;
+ bool joined_mbus;
};
-int intel_dbuf_init(struct drm_i915_private *dev_priv);
-
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0ec0cf191955..f0a82b37bd1a 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -430,7 +430,7 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (INTEL_GEN(i915) > 6)
+ if (GRAPHICS_VER(i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b4aaf8b7109f..1bed8f666048 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1635,7 +1635,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
#define fw_domain_init(uncore__, id__, set__, ack__) \
(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
/* we'll prune the domains of missing engines later */
intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
@@ -1665,7 +1665,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
@@ -1733,7 +1733,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN(i915, 6)) {
+ } else if (GRAPHICS_VER(i915) == 6) {
uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
uncore->funcs.force_wake_put = fw_domains_put;
@@ -1800,7 +1800,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -1810,7 +1810,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
* generations up to Ironlake.
* For dgfx chips register range is expanded to 4MB.
*/
- if (INTEL_GEN(i915) < 5)
+ if (GRAPHICS_VER(i915) < 5)
mmio_size = 512 * 1024;
else if (IS_DGFX(i915))
mmio_size = 4 * 1024 * 1024;
@@ -1849,7 +1849,7 @@ static void uncore_raw_init(struct intel_uncore *uncore)
if (intel_vgpu_active(uncore->i915)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
- } else if (IS_GEN(uncore->i915, 5)) {
+ } else if (GRAPHICS_VER(uncore->i915) == 5) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
@@ -1870,7 +1870,7 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
return ret;
forcewake_early_sanitize(uncore, 0);
- if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GRAPHICS_VER(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) {
@@ -1879,7 +1879,7 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
} else {
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN(i915, 8)) {
+ } else if (GRAPHICS_VER(i915) == 8) {
if (IS_CHERRYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
@@ -1888,11 +1888,11 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else if (IS_GEN(i915, 11)) {
+ } else if (GRAPHICS_VER(i915) == 11) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
@@ -1952,7 +1952,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
- if (IS_GEN_RANGE(i915, 6, 7))
+ if (IS_GRAPHICS_VER(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO;
/* clear out unclaimed reg detection bit */
@@ -1979,7 +1979,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id;
int i;
- if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
+ if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
return;
for (i = 0; i < I915_MAX_VCS; i++) {
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index ec776591e1cf..8309455f13ea 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -81,7 +81,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
if (!HAS_GT_UC(i915))
return;
- if (INTEL_GEN(i915) >= 11)
+ if (GRAPHICS_VER(i915) >= 11)
wopcm->size = GEN11_WOPCM_SIZE;
else
wopcm->size = GEN9_WOPCM_SIZE;
@@ -93,7 +93,7 @@ static u32 context_reserved_size(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915))
return BXT_WOPCM_RC6_CTX_RESERVED;
- else if (INTEL_GEN(i915) >= 10)
+ else if (GRAPHICS_VER(i915) >= 10)
return CNL_WOPCM_HW_CTX_RESERVED;
else
return 0;
@@ -145,11 +145,11 @@ static bool check_hw_restrictions(struct drm_i915_private *i915,
u32 guc_wopcm_base, u32 guc_wopcm_size,
u32 huc_fw_size)
{
- if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
- guc_wopcm_size))
+ if (GRAPHICS_VER(i915) == 9 && !gen9_check_dword_gap(i915, guc_wopcm_base,
+ guc_wopcm_size))
return false;
- if (IS_GEN(i915, 9) &&
+ if (GRAPHICS_VER(i915) == 9 &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 0a1472bb12bc..f843a5040706 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1884,9 +1884,9 @@ static int igt_cs_tlb(void *arg)
u32 *cs = batch + i * 64 / sizeof(*cs);
u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
- GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
cs[0] = MI_STORE_DWORD_IMM_GEN4;
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
cs[1] = lower_32_bits(addr);
cs[2] = upper_32_bits(addr);
cs[3] = i;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index bfb0290967a1..9e9a6cb1d9e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -98,7 +98,7 @@ test_stream(struct i915_perf *perf)
I915_ENGINE_CLASS_RENDER,
0),
.sample_flags = SAMPLE_OA_REPORT,
- .oa_format = IS_GEN(perf->i915, 12) ?
+ .oa_format = GRAPHICS_VER(perf->i915) == 12 ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
};
struct i915_perf_stream *stream;
@@ -162,7 +162,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);
@@ -363,7 +363,7 @@ static int live_noa_gpr(void *arg)
}
cmd = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(i915) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
cmd++;
cmd |= MI_USE_GGTT;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index ee8e753d98ce..db367a6721c5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -963,7 +963,7 @@ out_batch:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(i915);
+ const int ver = GRAPHICS_VER(i915);
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -988,11 +988,11 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
goto err;
}
- if (gen >= 8) {
+ if (ver >= 8) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*cmd++ = lower_32_bits(vma->node.start);
*cmd++ = upper_32_bits(vma->node.start);
- } else if (gen >= 6) {
+ } else if (ver >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
} else {
@@ -2482,7 +2482,7 @@ static int perf_request_latency(void *arg)
struct pm_qos_request qos;
int err = 0;
- if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
+ if (GRAPHICS_VER(i915) < 8) /* per-engine CS timestamp, semaphores */
return 0;
cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 5fe397b7d1d9..24d87d0fc747 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -174,15 +174,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- if (INTEL_GEN(rq->engine->i915) >= 8) {
+ if (GRAPHICS_VER(rq->engine->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
- } else if (INTEL_GEN(rq->engine->i915) >= 6) {
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
- } else if (INTEL_GEN(rq->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@@ -194,11 +194,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
else if (IS_HASWELL(rq->engine->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
- else if (INTEL_GEN(rq->engine->i915) >= 6)
+ else if (GRAPHICS_VER(rq->engine->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -216,7 +216,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
- if (INTEL_GEN(rq->engine->i915) <= 5)
+ if (GRAPHICS_VER(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index f76c9bcec735..8ef9e6a4ad05 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -321,7 +321,7 @@ static int live_fw_table(void *arg)
/* Confirm the table we load is still valid */
return intel_fw_table_check(gt->uncore->fw_domains_table,
gt->uncore->fw_domains_table_entries,
- INTEL_GEN(gt->i915) >= 9);
+ GRAPHICS_VER(gt->i915) >= 9);
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 63f411ab393b..6f4c80bbc0eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -30,9 +30,8 @@ struct mtk_disp_color_data {
unsigned int color_offset;
};
-/**
+/*
* struct mtk_disp_color - DISP_COLOR driver structure
- * @ddp_comp: structure containing type enum and hardware resources
* @crtc: associated crtc to report irq events to
* @data: platform colour driver data
*/
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index 3ebf91e0ab41..3a5815ab4079 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -29,10 +29,8 @@ struct mtk_disp_gamma_data {
bool has_dither;
};
-/**
+/*
* struct mtk_disp_gamma - DISP_GAMMA driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
*/
struct mtk_disp_gamma {
struct clk *clk;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 961f87f8d4d1..fa9d79963cd3 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -66,9 +66,8 @@ struct mtk_disp_ovl_data {
bool smi_id_en;
};
-/**
+/*
* struct mtk_disp_ovl - DISP_OVL driver structure
- * @ddp_comp: structure containing type enum and hardware resources
* @crtc: associated crtc to report vblank events to
* @data: platform data
*/
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 728aaadfea8c..705f28ceb4dd 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -55,10 +55,8 @@ struct mtk_disp_rdma_data {
unsigned int fifo_size;
};
-/**
+/*
* struct mtk_disp_rdma - DISP_RDMA driver structure
- * @ddp_comp: structure containing type enum and hardware resources
- * @crtc: associated crtc to report irq events to
* @data: local driver data
*/
struct mtk_disp_rdma {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 10f693ea89d3..52536e7adb95 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -7,6 +7,8 @@ config DRM_MSM
depends on IOMMU_SUPPORT
depends on OF && COMMON_CLK
depends on QCOM_OCMEM || QCOM_OCMEM=n
+ depends on QCOM_LLCC || QCOM_LLCC=n
+ depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
@@ -15,7 +17,6 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM if ARCH_QCOM
- select QCOM_COMMAND_DB if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..a94a43de95ef 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -817,9 +817,9 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
- fobj = dma_resv_get_list(obj->resv);
+ fobj = dma_resv_shared_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_get_excl(obj->resv);
+ fence = dma_resv_excl_fence(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout_rcu(obj->resv, write,
- true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
@@ -1025,7 +1024,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
+ fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
@@ -1035,7 +1034,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0cb1f9d848d3..8d048bacd6f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 0a9334deffe2..b45ec3086285 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
- if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
+ if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c390f24f25f3..520b1ea9d16c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (nvbo->bo.pin_count) {
bool error = evict;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
@@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- bo->mem.mem_type, domain);
+ bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
@@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
ttm_bo_pin(&nvbo->bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
@@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
@@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
@@ -754,7 +754,7 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
- struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
@@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
- ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
@@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
}
}
- if (new_reg) {
- if (new_reg->mm_node)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
- else
- nvbo->offset = 0;
- }
+ if (new_reg)
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
@@ -955,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -969,7 +965,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_resource *old_reg = &bo->mem;
+ struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1009,7 +1005,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
@@ -1045,7 +1041,7 @@ out:
}
out_ntfy:
if (ret) {
- nouveau_bo_move_ntfy(bo, &bo->mem);
+ nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
@@ -1170,7 +1166,7 @@ out:
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
- nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
@@ -1200,12 +1196,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
- if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
- if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
@@ -1213,7 +1209,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
+ bo->resource->start + bo->resource->num_pages < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7cfac265fd45..40362600eed2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+ if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 93ac78bda750..4f9b3aa5deda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.offset;
+ info->fix.smem_start = nvbo->bo.resource->bus.offset;
info->fix.smem_len = nvbo->bo.base.size;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7e..6b43918035df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
return ret;
}
- fobj = dma_resv_get_list(resv);
- fence = dma_resv_get_excl(resv);
+ fobj = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 722e1decc202..5b27845075a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- bo->mem.mem_type == TTM_PL_VRAM)
+ bo->resource->mem_type == TTM_PL_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
- bo->mem.mem_type == TTM_PL_TT)
+ bo->resource->mem_type == TTM_PL_TT)
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
@@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ (nvbo->bo.resource->mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ if (nvbo->bo.resource->mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
if (!nvbo->kmap.virtual) {
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -870,7 +870,7 @@ revalidate:
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
- nvbo->bo.mem.
+ nvbo->bo.resource->
num_pages,
&nvbo->kmap);
if (ret) {
@@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
- no_wait ? 0 : 30 * HZ);
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a1049e9feee1..0de6549fb875 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -178,25 +178,24 @@ void
nouveau_mem_del(struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- if (!mem)
- return;
+
nouveau_mem_fini(mem);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
+ kfree(mem);
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
+
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
- reg->mm_node = mem;
+ *res = &mem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 7df3848e85aa..2c01166a90f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -6,13 +6,8 @@ struct ttm_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
-static inline struct nouveau_mem *
-nouveau_mem(struct ttm_resource *reg)
-{
- return reg->mm_node;
-}
-
struct nouveau_mem {
+ struct ttm_resource base;
struct nouveau_cli *cli;
u8 kind;
u8 comp;
@@ -20,8 +15,14 @@ struct nouveau_mem {
struct nvif_vma vma[2];
};
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_resource *reg)
+{
+ return container_of(reg, struct nouveau_mem, base);
+}
+
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
- struct ttm_resource *);
+ struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 65430912ff72..f4c2e46b6fe1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -26,6 +26,8 @@
#include <linux/limits.h>
#include <linux/swiotlb.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
@@ -43,7 +45,7 @@ static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -52,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
+ ttm_resource_init(bo, place, *res);
+
+ ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
@@ -74,17 +78,18 @@ static int
nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- reg->start = 0;
+ ttm_resource_init(bo, place, *res);
+ (*res)->start = 0;
return 0;
}
@@ -97,26 +102,27 @@ static int
nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
+ mem = nouveau_mem(*res);
+ ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
- reg->start = mem->vma[0].addr >> PAGE_SHIFT;
+ (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a49e88129c92..67d6619fcd5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -77,7 +77,7 @@ int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
- struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
@@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index b1cd8d7dd87d..07c2e0878c24 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -77,8 +77,8 @@ static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
+ struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 1625826505f6..ea1e1f480bfe 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
+ struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index 58db83ebadc5..a96084b34a78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc)
nvkm_wr32(device, 0xb81610, 0x6);
}
-void
+static void
tu102_mc_intr_unarm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_rearm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct tu102_mc *mc = tu102_mc(base);
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index fe5ac3ef9018..4787f0833264 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -42,6 +42,7 @@ struct kd35t133 {
struct gpio_desc *reset_gpio;
struct regulator *vdd;
struct regulator *iovcc;
+ enum drm_panel_orientation orientation;
bool prepared;
};
@@ -216,6 +217,7 @@ static int kd35t133_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
@@ -258,6 +260,12 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 5e9ccefb88f6..2229f1af2ca8 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -29,7 +29,7 @@
* DEALINGS IN THE SOFTWARE.
*/
-/**
+/*
* Raspberry Pi 7" touchscreen panel driver.
*
* The 7" touchscreen consists of a DPI LCD panel, a Toshiba
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 4d2a149b202c..320a2a8fd459 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -38,7 +38,7 @@
#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
-/**
+/*
* Command2 with BK function selection.
*
* BIT[4, 0]: [CN2, BKXSEL]
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 597cf1459b0a..f614e98771e4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -45,6 +45,7 @@ struct panfrost_features {
u32 thread_max_workgroup_sz;
u32 thread_max_barrier_sz;
u32 coherency_features;
+ u32 afbc_features;
u32 texture_features[4];
u32 js_features[16];
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ca07098a6141..075ec0ef746c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,6 +63,7 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
thread_max_barrier_sz);
PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+ PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
@@ -311,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
- true, timeout);
+ ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -547,6 +547,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
* Panfrost driver version:
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ * - 1.2 - adds AFBC_FEATURES query
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -559,7 +560,7 @@ static const struct drm_driver panfrost_drm_driver = {
.desc = "panfrost DRM",
.date = "20180908",
.major = 1,
- .minor = 1,
+ .minor = 2,
.gem_create_object = panfrost_gem_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 2aae636f1cf5..0e70e27fd8c3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -228,6 +228,7 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+ pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..2df3e999a38d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
+ implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
}
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index eddaa62ad8b0..dc9df5457f1c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -82,6 +82,7 @@
#define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4))
#define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4))
+#define GPU_AFBC_FEATURES (0x4C) /* (RO) AFBC support on Bifrost */
#define GPU_SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
#define GPU_SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 80f6748055e3..3aae387a96af 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -3,6 +3,7 @@ config DRM_PL111
tristate "DRM Support for PL111 CLCD Controller"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
+ depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 183d15e2cf58..1f9a59601bb1 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
int rel;
rcu_read_lock();
- fobj = rcu_dereference(bo->tbo.base.resv->fence);
+ fobj = dma_resv_shared_list(bo->tbo.base.resv);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 20a0f3ab84ad..dd6abee55f56 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -292,12 +292,12 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
unsigned long offset)
{
struct qxl_memslot *slot =
- (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ (bo->tbo.resource->mem_type == TTM_PL_VRAM)
? &qdev->main_slot : &qdev->surfaces_slot;
- /* TODO - need to hold one of the locks to read bo->tbo.mem.start */
+ /* TODO - need to hold one of the locks to read bo->tbo.resource->start */
- return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset);
+ return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
}
/* qxl_display.c */
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index a635d9fdf8ac..d636ba685451 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
surf.height = args->height;
surf.stride = pitch;
surf.format = format;
+ surf.data = 0;
+
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_CPU,
args->size, &surf, &qobj,
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6e26d70f2f07..fbb36e3e8564 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -212,14 +212,14 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct io_mapping *map;
struct dma_buf_map bo_map;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
+ else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
map = qdev->surface_mapping;
else
goto fallback;
- offset = bo->tbo.mem.start << PAGE_SHIFT;
+ offset = bo->tbo.resource->start << PAGE_SHIFT;
return io_mapping_map_atomic_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
@@ -266,8 +266,8 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
- (bo->tbo.mem.mem_type != TTM_PL_PRIV))
+ if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.resource->mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 47afe95d04a1..19fd39d9a00c 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -131,7 +132,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
- if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+ if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
@@ -140,7 +141,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int ret;
qxl_bo_move_notify(bo, new_mem);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80a3bee933d6..9ed2b2700e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.mem.num_pages -
- (int)lb->robj->tbo.mem.num_pages;
+ return (int)la->robj->tbo.resource->num_pages -
+ (int)lb->robj->tbo.resource->num_pages;
}
/**
@@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
- &rdev->ring_tmp_bo.bo->tbo.mem);
+ rdev->ring_tmp_bo.bo->tbo.resource);
if (r)
return r;
@@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd..406681317419 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+ work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ff8849827d61..458f92a70887 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -523,13 +523,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
r = 0;
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put(gobj);
return r;
@@ -552,14 +552,14 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
@@ -643,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_free;
list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+ domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
@@ -656,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_unlock;
if (bo_va->it.start)
- r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -860,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
unsigned domain;
const char *placement;
- domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
switch (domain) {
case RADEON_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c3..9fa88549c89e 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index cee11c55fd15..bfaaa3c969a3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
- radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+ radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
@@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
{
ttm_bo_unpin(&bo->tbo);
if (!bo->tbo.pin_count) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
else
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
@@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
- radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
@@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.mem.start << PAGE_SHIFT,
+ bo->tbo.resource->start << PAGE_SHIFT,
bo->tbo.base.size);
return 0;
}
@@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
return 0;
}
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
@@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
if (!new_mem)
return;
- radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+ radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
@@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
+ size = bo->resource->num_pages << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
@@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (likely(!r)) {
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index fd4116bdde0f..1739c6a142cd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
rdev = radeon_get_rdev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
start = rdev->mc.gtt_start;
break;
@@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
break;
}
- return (bo->tbo.mem.start << PAGE_SHIFT) + start;
+ return (bo->tbo.resource->start << PAGE_SHIFT) + start;
}
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3861c0b98fcf..c67b6ddb29a4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
return;
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076..9257b60144c4 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,14 +98,14 @@ int radeon_sync_resv(struct radeon_device *rdev,
int r = 0;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
r = dma_fence_wait(f, true);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (shared || !flist || r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 1729cb9a95c5..c9fed5f2b870 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
+ __entry->pages = bo->tbo.resource->num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a71d94f7067b..ad2a5a791bba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -98,12 +99,12 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
- bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
int i;
@@ -195,9 +196,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
+ struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
- struct ttm_resource *old_mem = &bo->mem;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
@@ -229,7 +230,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 753da95e6abb..2ea86919d953 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -479,7 +479,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = dma_resv_get_excl(bo->tbo.base.resv);
+ f = dma_resv_excl_fence(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 5c2b650b561d..03f3377f918c 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -272,7 +272,7 @@ static void hda_write(struct sti_hda *hda, u32 val, int offset)
}
/**
- * Search for a video mode in the supported modes table
+ * hda_get_mode_idx - Search for a video mode in the supported modes table
*
* @mode: mode being searched
* @idx: index of the found mode
@@ -292,7 +292,7 @@ static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
}
/**
- * Enable the HD DACS
+ * hda_enable_hd_dacs - Enable the HD DACS
*
* @hda: pointer to HD analog structure
* @enable: true if HD DACS need to be enabled, else false
@@ -380,7 +380,7 @@ static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
}
/**
- * Configure AWG, writing instructions
+ * sti_hda_configure_awg - Configure AWG, writing instructions
*
* @hda: pointer to HD analog structure
* @awg_instr: pointer to AWG instructions table
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
index d5f94dca0d32..d25ecd4f4b67 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -67,7 +67,7 @@ static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
};
/**
- * Start hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_start - Start hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*
@@ -179,7 +179,7 @@ err:
}
/**
- * Stop hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_stop - Stop hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*/
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index edbb99f53de1..d09b08995b12 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -782,7 +782,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
}
/**
- * sti_vdp_vtg_cb
+ * sti_hqvdp_vtg_cb
* @nb: notifier block
* @evt: event message
* @data: private data
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index df3817f0fd30..2499715a69b7 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -153,7 +153,7 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
}
/**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_color_order - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -177,7 +177,7 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
}
/**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_clip_mode - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -193,7 +193,7 @@ static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
}
/**
- * Set the rounded value of a VIP
+ * tvout_vip_set_rnd - Set the rounded value of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -209,7 +209,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
}
/**
- * Select the VIP input
+ * tvout_vip_set_sel_input - Select the VIP input
*
* @tvout: tvout structure
* @reg: register to set
@@ -247,7 +247,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
}
/**
- * Select the input video signed or unsigned
+ * tvout_vip_set_in_vid_fmt - Select the input video signed or unsigned
*
* @tvout: tvout structure
* @reg: register to set
@@ -264,7 +264,7 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
}
/**
- * Set preformatter matrix
+ * tvout_preformatter_set_matrix - Set preformatter matrix
*
* @tvout: tvout structure
* @mode: display mode structure
@@ -289,7 +289,7 @@ static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
}
/**
- * Start VIP block for DVO output
+ * tvout_dvo_start - Start VIP block for DVO output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
@@ -343,7 +343,7 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
}
/**
- * Start VIP block for HDMI output
+ * tvout_hdmi_start - Start VIP block for HDMI output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
@@ -392,7 +392,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
}
/**
- * Start HDF VIP and HD DAC
+ * tvout_hda_start - Start HDF VIP and HD DAC
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index e99771b947b6..08b71248044d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -531,7 +531,6 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_encoder *encoder = NULL;
struct drm_bridge *bridge = NULL;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct videomode vm;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
u32 bus_flags = 0;
@@ -570,31 +569,33 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
}
- drm_display_mode_to_videomode(mode, &vm);
-
DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
- DRM_DEBUG_DRIVER("Video mode: %dx%d", vm.hactive, vm.vactive);
+ DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
- vm.hfront_porch, vm.hback_porch, vm.hsync_len,
- vm.vfront_porch, vm.vback_porch, vm.vsync_len);
+ mode->hsync_start - mode->hdisplay,
+ mode->htotal - mode->hsync_end,
+ mode->hsync_end - mode->hsync_start,
+ mode->vsync_start - mode->vdisplay,
+ mode->vtotal - mode->vsync_end,
+ mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
- hsync = vm.hsync_len - 1;
- vsync = vm.vsync_len - 1;
- accum_hbp = hsync + vm.hback_porch;
- accum_vbp = vsync + vm.vback_porch;
- accum_act_w = accum_hbp + vm.hactive;
- accum_act_h = accum_vbp + vm.vactive;
- total_width = accum_act_w + vm.hfront_porch;
- total_height = accum_act_h + vm.vfront_porch;
+ hsync = mode->hsync_end - mode->hsync_start - 1;
+ vsync = mode->vsync_end - mode->vsync_start - 1;
+ accum_hbp = mode->htotal - mode->hsync_start - 1;
+ accum_vbp = mode->vtotal - mode->vsync_start - 1;
+ accum_act_w = accum_hbp + mode->hdisplay;
+ accum_act_h = accum_vbp + mode->vdisplay;
+ total_width = mode->htotal - 1;
+ total_height = mode->vtotal - 1;
/* Configures the HS, VS, DE and PC polarities. Default Active Low */
val = 0;
- if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= GCR_HSPOL;
- if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= GCR_VSPOL;
if (bus_flags & DRM_BUS_FLAG_DE_LOW)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 0db164a774a1..e779855bcd6e 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -370,6 +370,11 @@ static const u32 sun8i_ui_layer_formats[] = {
DRM_FORMAT_XRGB8888,
};
+static const uint64_t sun8i_layer_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
@@ -392,7 +397,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
&sun8i_ui_layer_funcs,
sun8i_ui_layer_formats,
ARRAY_SIZE(sun8i_ui_layer_formats),
- NULL, type, NULL);
+ sun8i_layer_modifiers, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 46420780db59..1c86c2dd0bbf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -534,6 +534,11 @@ static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_YVU422,
};
+static const uint64_t sun8i_layer_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
@@ -560,7 +565,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
formats, format_count,
- NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+ sun8i_layer_modifiers,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 51a94fd63bd7..db53fecca696 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
int i, mem_type;
drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
- bo, bo->mem.num_pages, bo->base.size >> 10,
+ bo, bo->resource->num_pages, bo->base.size >> 10,
bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
@@ -109,7 +109,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
bdev->funcs->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
break;
@@ -163,11 +163,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
+ struct ttm_resource_manager *old_man, *new_man;
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
- struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret;
+ old_man = ttm_manager_type(bdev, bo->resource->mem_type);
+ new_man = ttm_manager_type(bdev, mem->mem_type);
+
ttm_bo_unmap_virtual(bo);
/*
@@ -200,7 +202,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return 0;
out_err:
- new_man = ttm_manager_type(bdev, bo->mem.mem_type);
+ new_man = ttm_manager_type(bdev, bo->resource->mem_type);
if (!new_man->use_tt)
ttm_bo_tt_destroy(bo);
@@ -221,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
bo->bdev->funcs->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -259,8 +261,8 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
int i;
rcu_read_lock();
- fobj = rcu_dereference(resv->fence);
- fence = rcu_dereference(resv->fence_excl);
+ fobj = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -294,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
ret = 0;
else
ret = -EBUSY;
@@ -306,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -409,18 +411,18 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
+ dma_resv_wait_timeout(bo->base.resv, true, false,
+ 30 * HZ);
}
if (bo->bdev->funcs->release_notify)
bo->bdev->funcs->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, true) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -438,7 +440,7 @@ static void ttm_bo_release(struct kref *kref)
*/
if (bo->pin_count) {
bo->pin_count = 0;
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
}
kref_init(&bo->kref);
@@ -487,7 +489,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource evict_mem;
+ struct ttm_resource *evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
int ret = 0;
@@ -501,10 +503,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
- ttm_bo_wait(bo, false, false);
+ ret = ttm_bo_wait(bo, true, false);
+ if (ret)
+ return ret;
- ttm_bo_cleanup_memtype_use(bo);
- return ttm_tt_create(bo, false);
+ /*
+ * Since we've already synced, this frees backing store
+ * immediately.
+ */
+ return ttm_bo_pipeline_gutting(bo);
}
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
@@ -517,7 +524,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
if (ret != -ERESTARTSYS)
@@ -531,11 +538,15 @@ out:
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
+ dma_resv_assert_held(bo->base.resv);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
- (place->lpfn && place->lpfn <= bo->mem.start))
+ if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
+ (place->lpfn && place->lpfn <= bo->resource->start))
return false;
return true;
@@ -553,7 +564,9 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
+ struct ttm_operation_ctx *ctx,
+ const struct ttm_place *place,
+ bool *locked, bool *busy)
{
bool ret = false;
@@ -571,6 +584,14 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
*busy = !ret;
}
+ if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) {
+ ret = false;
+ if (*locked) {
+ dma_resv_unlock(bo->base.resv);
+ *locked = false;
+ }
+ }
+
return ret;
}
@@ -625,20 +646,14 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- &busy)) {
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
+ &locked, &busy)) {
if (busy && !busy_bo && ticket !=
dma_resv_locking_ctx(bo->base.resv))
busy_bo = bo;
continue;
}
- if (place && !bdev->funcs->eviction_valuable(bo,
- place)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- continue;
- }
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -726,14 +741,15 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
+ struct ttm_resource_manager *man;
struct ww_acquire_ctx *ticket;
int ret;
+ man = ttm_manager_type(bdev, place->mem_type);
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = ttm_resource_alloc(bo, place, mem);
@@ -747,37 +763,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_placement - check if placement is compatible
- * @bo: BO to find memory for
- * @place: where to search
- * @mem: the memory object to fill in
- *
- * Check if placement is compatible and fill in mem structure.
- * Returns -EBUSY if placement won't work or negative error code.
- * 0 when placement can be used.
- */
-static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *mem)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- return -EBUSY;
-
- mem->mem_type = place->mem_type;
- mem->placement = place->flags;
-
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, mem, NULL);
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
+ return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
/*
@@ -790,7 +776,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
@@ -805,8 +791,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
@@ -816,8 +802,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- man = ttm_manager_type(bdev, mem->mem_type);
- ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_resource_free(bo, mem);
if (ret == -EBUSY)
@@ -830,9 +815,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
+ struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
@@ -851,7 +837,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
+ if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
ttm_bo_move_to_lru_tail_unlocked(bo);
return ret;
@@ -859,12 +845,12 @@ error:
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
struct ttm_placement hop_placement;
- struct ttm_resource hop_mem;
+ struct ttm_resource *hop_mem;
int ret;
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
@@ -875,7 +861,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
if (ret)
return ret;
/* move to the bounce domain */
- ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+ ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
if (ret) {
ttm_resource_free(bo, &hop_mem);
return ret;
@@ -887,14 +873,12 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *mem;
struct ttm_place hop;
- struct ttm_resource mem;
int ret;
dma_resv_assert_held(bo->base.resv);
- memset(&hop, 0, sizeof(hop));
-
/*
* Determine where to move the buffer.
*
@@ -908,7 +892,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
if (ret)
return ret;
bounce:
- ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
@@ -976,18 +960,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement) {
- ret = ttm_bo_pipeline_gutting(bo);
- if (ret)
- return ret;
-
- return ttm_tt_create(bo, false);
- }
+ if (!placement->num_placement && !placement->num_busy_placement)
+ return ttm_bo_pipeline_gutting(bo);
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+ if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -995,7 +974,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
@@ -1017,7 +996,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
bool locked;
- int ret = 0;
+ int ret;
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
@@ -1027,7 +1006,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- ttm_resource_alloc(bo, &sys_mem, &bo->mem);
bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
@@ -1039,6 +1017,12 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
}
atomic_inc(&ttm_glob.bo_count);
+ ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
+ if (unlikely(ret)) {
+ ttm_bo_put(bo);
+ return ret;
+ }
+
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
@@ -1046,7 +1030,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
- bo->mem.num_pages);
+ bo->resource->num_pages);
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
@@ -1108,7 +1092,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
struct ttm_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free(bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1118,14 +1102,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
- interruptible, timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
+ timeout);
if (timeout < 0)
return timeout;
@@ -1140,10 +1124,19 @@ EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
+ struct ttm_place place;
bool locked;
int ret;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
+ /*
+ * While the bo may already reside in SYSTEM placement, set
+ * SYSTEM as new placement to cover also the move further below.
+ * The driver may use the fact that we're moving from SYSTEM
+ * as an indication that we're about to swap out.
+ */
+ memset(&place, 0, sizeof(place));
+ place.mem_type = TTM_PL_SYSTEM;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
if (!ttm_bo_get_unless_zero(bo)) {
@@ -1165,21 +1158,17 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
/*
* Move to system cached
*/
- if (bo->mem.mem_type != TTM_PL_SYSTEM) {
+ if (bo->resource->mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
- struct ttm_resource evict_mem;
- struct ttm_place place, hop;
+ struct ttm_resource *evict_mem;
+ struct ttm_place hop;
- memset(&place, 0, sizeof(place));
memset(&hop, 0, sizeof(hop));
-
- place.mem_type = TTM_PL_SYSTEM;
-
ret = ttm_resource_alloc(bo, &place, &evict_mem);
if (unlikely(ret))
goto out;
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
goto out;
@@ -1202,7 +1191,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ if (ttm_tt_is_populated(bo->ttm))
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae8b61460724..2f57f824e6db 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
#include <linux/dma-buf-map.h>
#include <linux/io.h>
@@ -72,190 +73,112 @@ void ttm_mem_io_free(struct ttm_device *bdev,
mem->bus.addr = NULL;
}
-static int ttm_resource_ioremap(struct ttm_device *bdev,
- struct ttm_resource *mem,
- void **virtual)
+/**
+ * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
+ * @bo: The struct ttm_buffer_object.
+ * @new_mem: The struct ttm_resource we're moving to (copy destination).
+ * @new_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @src_iter: A struct ttm_kmap_iter representing the source resource.
+ *
+ * This function is intended to be able to move out async under a
+ * dma-fence if desired.
+ */
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter)
{
- int ret;
- void *addr;
-
- *virtual = NULL;
- ret = ttm_mem_io_reserve(bdev, mem);
- if (ret || !mem->bus.is_iomem)
- return ret;
+ const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
+ const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
+ struct ttm_tt *ttm = bo->ttm;
+ struct dma_buf_map src_map, dst_map;
+ pgoff_t i;
- if (mem->bus.addr) {
- addr = mem->bus.addr;
- } else {
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+ /* Single TTM move. NOP */
+ if (dst_ops->maps_tt && src_ops->maps_tt)
+ return;
- if (mem->bus.caching == ttm_write_combined)
- addr = ioremap_wc(mem->bus.offset, bus_size);
-#ifdef CONFIG_X86
- else if (mem->bus.caching == ttm_cached)
- addr = ioremap_cache(mem->bus.offset, bus_size);
-#endif
- else
- addr = ioremap(mem->bus.offset, bus_size);
- if (!addr) {
- ttm_mem_io_free(bdev, mem);
- return -ENOMEM;
+ /* Don't move nonexistent data. Clear destination instead. */
+ if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
+ if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ if (dst_map.is_iomem)
+ memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
+ else
+ memset(dst_map.vaddr, 0, PAGE_SIZE);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
}
+ return;
}
- *virtual = addr;
- return 0;
-}
-
-static void ttm_resource_iounmap(struct ttm_device *bdev,
- struct ttm_resource *mem,
- void *virtual)
-{
- if (virtual && mem->bus.addr == NULL)
- iounmap(virtual);
- ttm_mem_io_free(bdev, mem);
-}
-
-static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
-{
- uint32_t *dstP =
- (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
- uint32_t *srcP =
- (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-
- int i;
- for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
- iowrite32(ioread32(srcP++), dstP++);
- return 0;
-}
-
-static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *d = ttm->pages[page];
- void *dst;
-
- if (!d)
- return -ENOMEM;
-
- src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = kmap_atomic_prot(d, prot);
- if (!dst)
- return -ENOMEM;
-
- memcpy_fromio(dst, src, PAGE_SIZE);
-
- kunmap_atomic(dst);
-
- return 0;
-}
-
-static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *s = ttm->pages[page];
- void *src;
-
- if (!s)
- return -ENOMEM;
-
- dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = kmap_atomic_prot(s, prot);
- if (!src)
- return -ENOMEM;
- memcpy_toio(dst, src, PAGE_SIZE);
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ src_ops->map_local(src_iter, &src_map, i);
- kunmap_atomic(src);
+ drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
- return 0;
+ if (src_ops->unmap_local)
+ src_ops->unmap_local(src_iter, &src_map);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
+ }
}
+EXPORT_SYMBOL(ttm_move_memcpy);
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
+ struct ttm_resource *dst_mem)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = &bo->mem;
- struct ttm_resource old_copy = *old_mem;
- void *old_iomap;
- void *new_iomap;
- int ret;
- unsigned long i;
-
- ret = ttm_bo_wait_ctx(bo, ctx);
- if (ret)
- return ret;
-
- ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
- if (ret)
- return ret;
- ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
- if (ret)
- goto out;
-
- /*
- * Single TTM move. NOP.
- */
- if (old_iomap == NULL && new_iomap == NULL)
- goto out2;
-
- /*
- * Don't move nonexistent data. Clear destination instead.
- */
- if (old_iomap == NULL &&
- (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
- !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
- memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
- goto out2;
- }
+ struct ttm_resource *src_mem = bo->resource;
+ struct ttm_resource_manager *src_man =
+ ttm_manager_type(bdev, src_mem->mem_type);
+ struct ttm_resource src_copy = *src_mem;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_linear_io io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+ int ret = 0;
- /*
- * TTM might be null for moves within the same region.
- */
- if (ttm) {
+ if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
- goto out1;
+ return ret;
}
- for (i = 0; i < new_mem->num_pages; ++i) {
- if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
- prot);
- } else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
- prot);
- } else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, i);
- }
- if (ret)
- goto out1;
+ dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
+ if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
+ dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+ if (IS_ERR(dst_iter))
+ return PTR_ERR(dst_iter);
+
+ src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
+ if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
+ src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+ if (IS_ERR(src_iter)) {
+ ret = PTR_ERR(src_iter);
+ goto out_src_iter;
}
- mb();
-out2:
- old_copy = *old_mem;
- ttm_bo_assign_mem(bo, new_mem);
+ ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+ src_copy = *src_mem;
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
- if (!man->use_tt)
- ttm_bo_tt_destroy(bo);
+ if (!src_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+out_src_iter:
+ if (!dst_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
-out1:
- ttm_resource_iounmap(bdev, old_mem, new_iomap);
-out:
- ttm_resource_iounmap(bdev, &old_copy, old_iomap);
-
- /*
- * On error, keep the mm node!
- */
- if (!ret)
- ttm_resource_free(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -336,27 +259,7 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
man = ttm_manager_type(bo->bdev, res->mem_type);
caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
- /* Cached mappings need no adjustment */
- if (caching == ttm_cached)
- return tmp;
-
-#if defined(__i386__) || defined(__x86_64__)
- if (caching == ttm_write_combined)
- tmp = pgprot_writecombine(tmp);
- else if (boot_cpu_data.x86 > 3)
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__)
- if (caching == ttm_write_combined)
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
+ return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
@@ -365,24 +268,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
- if (bo->mem.bus.addr) {
+ if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
+ resource_size_t res = bo->resource->bus.offset + offset;
+
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->bus.caching == ttm_write_combined)
- map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap_wc(res, size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
- map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap_cache(res, size);
#endif
else
- map->virtual = ioremap(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -392,7 +294,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
@@ -438,15 +340,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->mem.num_pages)
+ if (num_pages > bo->resource->num_pages)
return -EINVAL;
- if ((start_page + num_pages) > bo->mem.num_pages)
+ if ((start_page + num_pages) > bo->resource->num_pages)
return -EINVAL;
- ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
@@ -475,7 +377,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
@@ -483,7 +385,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
int ret;
ret = ttm_mem_io_reserve(bo->bdev, mem);
@@ -542,7 +444,7 @@ EXPORT_SYMBOL(ttm_bo_vmap);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
if (dma_buf_map_is_null(map))
return;
@@ -553,7 +455,7 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
iounmap(map->vaddr_iomem);
dma_buf_map_clear(map);
- ttm_mem_io_free(bo->bdev, &bo->mem);
+ ttm_mem_io_free(bo->bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_vunmap);
@@ -567,7 +469,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
return 0;
}
@@ -605,6 +507,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
+ bo->resource = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
@@ -615,7 +518,9 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *from;
+
+ from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
@@ -628,7 +533,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
}
spin_unlock(&from->move_lock);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
@@ -641,7 +546,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
@@ -662,26 +567,82 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+/**
+ * ttm_bo_pipeline_gutting - purge the contents of a bo
+ * @bo: The buffer object
+ *
+ * Purge the contents of a bo, async if the bo is not idle.
+ * After a successful call, the bo is left unpopulated in
+ * system placement. The function may wait uninterruptible
+ * for idle on OOM.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
+ struct ttm_resource *sys_res;
+ struct ttm_tt *ttm;
int ret;
- ret = ttm_buffer_object_transfer(bo, &ghost);
+ ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
if (ret)
return ret;
+ /* If already idle, no need for ghost object dance. */
+ ret = ttm_bo_wait(bo, false, true);
+ if (ret != -EBUSY) {
+ if (!bo->ttm) {
+ /* See comment below about clearing. */
+ ret = ttm_tt_create(bo, true);
+ if (ret)
+ goto error_free_sys_mem;
+ } else {
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ if (bo->type == ttm_bo_type_device)
+ ttm_tt_mark_for_clear(bo->ttm);
+ }
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, sys_res);
+ return 0;
+ }
+
+ /*
+ * We need an unpopulated ttm_tt after giving our current one,
+ * if any, to the ghost object. And we can't afford to fail
+ * creating one *after* the operation. If the bo subsequently gets
+ * resurrected, make sure it's cleared (if ttm_bo_type_device)
+ * to avoid leaking sensitive information to user-space.
+ */
+
+ ttm = bo->ttm;
+ bo->ttm = NULL;
+ ret = ttm_tt_create(bo, true);
+ swap(bo->ttm, ttm);
+ if (ret)
+ goto error_free_sys_mem;
+
+ ret = ttm_buffer_object_transfer(bo, &ghost);
+ if (ret)
+ goto error_destroy_tt;
+
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
- ttm_resource_alloc(bo, &sys_mem, &bo->mem);
- bo->ttm = NULL;
-
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
-
+ bo->ttm = ttm;
+ bo->resource = NULL;
+ ttm_bo_assign_mem(bo, sys_res);
return 0;
+
+error_destroy_tt:
+ ttm_tt_destroy(bo->bdev, ttm);
+
+error_free_sys_mem:
+ ttm_resource_free(bo, &sys_res);
+ return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 9bd15cb39145..f56be5bc0861 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -102,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
if (bdev->funcs->io_mem_pfn)
return bdev->funcs->io_mem_pfn(bo, page_offset);
- return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
+ return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
}
/**
@@ -200,10 +200,10 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->mem.num_pages)
+ if (page_offset + fault_page_size > bo->resource->num_pages)
goto out_fallback;
- if (bo->mem.bus.is_iomem)
+ if (bo->resource->bus.is_iomem)
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
else
pfn = page_to_pfn(ttm->pages[page_offset]);
@@ -213,7 +213,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
goto out_fallback;
/* Check that memory is contiguous. */
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
for (i = 1; i < fault_page_size; ++i) {
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
goto out_fallback;
@@ -299,7 +299,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(ret != 0))
return ret;
- err = ttm_mem_io_reserve(bdev, &bo->mem);
+ err = ttm_mem_io_reserve(bdev, bo->resource);
if (unlikely(err != 0))
return VM_FAULT_SIGBUS;
@@ -308,11 +308,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->mem.num_pages))
+ if (unlikely(page_offset >= bo->resource->num_pages))
return VM_FAULT_SIGBUS;
- prot = ttm_io_prot(bo, &bo->mem, prot);
- if (!bo->mem.bus.is_iomem) {
+ prot = ttm_io_prot(bo, bo->resource, prot);
+ if (!bo->resource->bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -337,7 +337,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* first page.
*/
for (i = 0; i < num_prefault; ++i) {
- if (bo->mem.bus.is_iomem) {
+ if (bo->resource->bus.is_iomem) {
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -359,12 +359,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* at arbitrary times while the data is mmap'ed.
* See vmf_insert_mixed_prot() for a discussion.
*/
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed_prot(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV),
- prot);
- else
- ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -411,15 +406,9 @@ vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
pfn = page_to_pfn(page);
/* Prefault the entire VMA range right away to avoid further faults */
- for (address = vma->vm_start; address < vma->vm_end; address += PAGE_SIZE) {
-
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed_prot(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV),
- prot);
- else
- ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
- }
+ for (address = vma->vm_start; address < vma->vm_end;
+ address += PAGE_SIZE)
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
return ret;
}
@@ -521,14 +510,14 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (ret)
return ret;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(bo->ttm);
@@ -560,8 +549,14 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
-static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
+ /* Enforce no COW since would have really strange behavior with it. */
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ ttm_bo_get(bo);
+
/*
* Drivers may want to override the vm_ops field. Otherwise we
* use TTM's default callbacks.
@@ -576,21 +571,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s
vma->vm_private_data = bo;
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_PFNMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-}
-
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
-{
- ttm_bo_get(bo);
- ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 56b0efdba1a9..997c458f68a9 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -31,12 +31,47 @@
*/
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pgtable.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
+#include <drm/ttm/ttm_caching.h>
#include "ttm_module.h"
+/**
+ * ttm_prot_from_caching - Modify the page protection according to the
+ * ttm cacing mode
+ * @caching: The ttm caching mode
+ * @tmp: The original page protection
+ *
+ * Return: The modified page protection
+ */
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
+{
+ /* Cached mappings need no adjustment */
+ if (caching == ttm_cached)
+ return tmp;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__) || defined(__mips__)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
struct dentry *ttm_debugfs_root;
static int __init ttm_init(void)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index b9d5da6e6a81..03395386e8a7 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,12 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_bo_api.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
/*
* Currently we use a spinlock for the lock, but a mutex *may* be
@@ -57,11 +58,11 @@ to_range_manager(struct ttm_resource_manager *man)
static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct ttm_range_manager *rman = to_range_manager(man);
+ struct ttm_range_mgr_node *node;
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
unsigned long lpfn;
int ret;
@@ -70,7 +71,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
if (!lpfn)
lpfn = man->size;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -78,35 +79,36 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
+ ttm_resource_init(bo, place, &node->base);
+
spin_lock(&rman->lock);
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
- } else {
- mem->mm_node = node;
- mem->start = node->start;
+ return ret;
}
- return ret;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
+ return 0;
}
static void ttm_range_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct ttm_range_manager *rman = to_range_manager(man);
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_remove_node(mem->mm_node);
- spin_unlock(&rman->lock);
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
- }
+ kfree(node);
}
static void ttm_range_man_debug(struct ttm_resource_manager *man,
@@ -125,6 +127,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
.debug = ttm_range_man_debug
};
+/**
+ * ttm_range_man_init
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ * @use_tt: if the memory manager uses tt
+ * @p_size: size of area to be managed in pages.
+ *
+ * Initialise a generic range manager for the selected memory type.
+ * The range manager is installed for this device in the type slot.
+ */
int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
@@ -152,6 +165,14 @@ int ttm_range_man_init(struct ttm_device *bdev,
}
EXPORT_SYMBOL(ttm_range_man_init);
+/**
+ * ttm_range_man_fini
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ *
+ * Remove the generic range manager from a slot and tear it down.
+ */
int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type)
{
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 59e2b7157e41..2431717376e7 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,17 +22,17 @@
* Authors: Christian König
*/
+#include <linux/dma-buf-map.h>
+#include <linux/io-mapping.h>
+#include <linux/scatterlist.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
-int ttm_resource_alloc(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *res)
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res)
{
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, place->mem_type);
-
- res->mm_node = NULL;
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type;
@@ -41,18 +41,29 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
-
- return man->func->alloc(man, bo, place, res);
}
+EXPORT_SYMBOL(ttm_resource_init);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res_ptr)
{
struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
+ ttm_manager_type(bo->bdev, place->mem_type);
- man->func->free(man, res);
- res->mm_node = NULL;
- res->mem_type = TTM_PL_SYSTEM;
+ return man->func->alloc(man, bo, place, res_ptr);
+}
+
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
+{
+ struct ttm_resource_manager *man;
+
+ if (!*res)
+ return;
+
+ man = ttm_manager_type(bo->bdev, (*res)->mem_type);
+ man->func->free(man, *res);
+ *res = NULL;
}
EXPORT_SYMBOL(ttm_resource_free);
@@ -147,3 +158,192 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+
+static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_iomap *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+ void __iomem *addr;
+
+retry:
+ while (i >= iter_io->cache.end) {
+ iter_io->cache.sg = iter_io->cache.sg ?
+ sg_next(iter_io->cache.sg) : iter_io->st->sgl;
+ iter_io->cache.i = iter_io->cache.end;
+ iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
+ PAGE_SHIFT;
+ iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
+ iter_io->start;
+ }
+
+ if (i < iter_io->cache.i) {
+ iter_io->cache.end = 0;
+ iter_io->cache.sg = NULL;
+ goto retry;
+ }
+
+ addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
+ (((resource_size_t)i - iter_io->cache.i)
+ << PAGE_SHIFT));
+ dma_buf_map_set_vaddr_iomem(dmap, addr);
+}
+
+static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *map)
+{
+ io_mapping_unmap_local(map->vaddr_iomem);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
+ .map_local = ttm_kmap_iter_iomap_map_local,
+ .unmap_local = ttm_kmap_iter_iomap_unmap_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
+ * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
+ * @iomap: The struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct
+ * ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start)
+{
+ iter_io->base.ops = &ttm_kmap_iter_io_ops;
+ iter_io->iomap = iomap;
+ iter_io->st = st;
+ iter_io->start = start;
+ memset(&iter_io->cache, 0, sizeof(iter_io->cache));
+
+ return &iter_io->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
+
+/**
+ * DOC: Linear io iterator
+ *
+ * This code should die in the not too near future. Best would be if we could
+ * make io-mapping use memremap for all io memory, and have memremap
+ * implement a kmap_local functionality. We could then strip a huge amount of
+ * code. These linear io iterators are implemented to mimic old functionality,
+ * and they don't use kmap_local semantics at all internally. Rather ioremap or
+ * friends, and at least on 32-bit they add global TLB flushes and points
+ * of failure.
+ */
+
+static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_linear_io *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+
+ *dmap = iter_io->dmap;
+ dma_buf_map_incr(dmap, i * PAGE_SIZE);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
+ .map_local = ttm_kmap_iter_linear_io_map_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It sets up a memcpy kmap iterator
+ * pointing at a linear chunk of io memory.
+ *
+ * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
+ * failure.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ int ret;
+
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret)
+ goto out_err;
+ if (!mem->bus.is_iomem) {
+ ret = -EINVAL;
+ goto out_io_free;
+ }
+
+ if (mem->bus.addr) {
+ dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+ iter_io->needs_unmap = false;
+ } else {
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+
+ iter_io->needs_unmap = true;
+ memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
+ if (mem->bus.caching == ttm_write_combined)
+ dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap_wc(mem->bus.offset,
+ bus_size));
+ else if (mem->bus.caching == ttm_cached)
+ dma_buf_map_set_vaddr(&iter_io->dmap,
+ memremap(mem->bus.offset, bus_size,
+ MEMREMAP_WB |
+ MEMREMAP_WT |
+ MEMREMAP_WC));
+
+ /* If uncached requested or if mapping cached or wc failed */
+ if (dma_buf_map_is_null(&iter_io->dmap))
+ dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap(mem->bus.offset,
+ bus_size));
+
+ if (dma_buf_map_is_null(&iter_io->dmap)) {
+ ret = -ENOMEM;
+ goto out_io_free;
+ }
+ }
+
+ iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
+ return &iter_io->base;
+
+out_io_free:
+ ttm_mem_io_free(bdev, mem);
+out_err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It cleans up a memcpy kmap
+ * iterator initialized by ttm_kmap_iter_linear_io_init.
+ */
+void
+ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
+ if (iter_io->dmap.is_iomem)
+ iounmap(iter_io->dmap.vaddr_iomem);
+ else
+ memunmap(iter_io->dmap.vaddr);
+ }
+
+ ttm_mem_io_free(bdev, mem);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 474221e863d0..63aca52f75e1 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -3,20 +3,27 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
+#include <linux/slab.h>
#include "ttm_module.h"
static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
return 0;
}
static void ttm_sys_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ kfree(res);
}
static const struct ttm_resource_manager_func ttm_sys_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 539e0232cb3b..24031a8acd2d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -415,7 +415,7 @@ DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
#endif
-/**
+/*
* ttm_tt_mgr_init - register with the MM shrinker
*
* Register with the MM shrinker for swapping out BOs.
@@ -433,3 +433,48 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
if (!ttm_dma32_pages_limit)
ttm_dma32_pages_limit = num_dma32_pages;
}
+
+static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_tt *iter_tt =
+ container_of(iter, typeof(*iter_tt), base);
+
+ dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+ iter_tt->prot));
+}
+
+static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *map)
+{
+ kunmap_local(map->vaddr);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
+ .map_local = ttm_kmap_iter_tt_map_local,
+ .unmap_local = ttm_kmap_iter_tt_unmap_local,
+ .maps_tt = true,
+};
+
+/**
+ * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
+ * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
+ * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt)
+{
+ iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
+ iter_tt->tt = tt;
+ if (tt)
+ iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
+ else
+ iter_tt->prot = PAGE_KERNEL;
+
+ return &iter_tt->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 361d3193258e..8c041d7ce4f1 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -9,7 +9,8 @@
#include "hgsmi_ch_setup.h"
/**
- * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * hgsmi_report_flags_location - Inform the host of the location of
+ * the host flags in VRAM via an HGSMI cmd.
* Return: 0 or negative errno value.
* @ctx: The context of the guest heap to use.
* @location: The offset chosen for the flags within guest VRAM.
@@ -33,7 +34,8 @@ int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
}
/**
- * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * hgsmi_send_caps_info - Notify the host of HGSMI-related guest capabilities
+ * via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context of the guest heap to use.
* @caps: The capabilities to report, see vbva_caps.
@@ -71,7 +73,8 @@ int hgsmi_test_query_conf(struct gen_pool *ctx)
}
/**
- * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * hgsmi_query_conf - Query the host for an HGSMI configuration
+ * parameter via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap used.
* @index: The index of the parameter to query.
@@ -99,7 +102,8 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
}
/**
- * Pass the host a new mouse pointer shape via an HGSMI command.
+ * hgsmi_update_pointer_shape - Pass the host a new mouse pointer shape
+ * via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to be used.
* @flags: Cursor flags.
@@ -171,9 +175,10 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
}
/**
- * Report the guest cursor position. The host may wish to use this information
- * to re-position its own cursor (though this is currently unlikely). The
- * current host cursor position is returned.
+ * hgsmi_cursor_position - Report the guest cursor position. The host may
+ * wish to use this information to re-position its
+ * own cursor (though this is currently unlikely).
+ * The current host cursor position is returned.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap used.
* @report_position: Are we reporting a position?
diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
index 7580b9002379..10b32d986b95 100644
--- a/drivers/gpu/drm/vboxvideo/modesetting.c
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
@@ -8,9 +8,11 @@
#include "hgsmi_channels.h"
/**
- * Set a video mode via an HGSMI request. The views must have been
- * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
- * set on the first display then it must be set first using registers.
+ * hgsmi_process_display_info - Set a video mode via an HGSMI request.
+ * The views must have been initialised first
+ * using @a VBoxHGSMISendViewInfo and if the mode
+ * is being set on the first display then it must
+ * be set first using registers.
* @ctx: The context containing the heap to use.
* @display: The screen number.
* @origin_x: The horizontal displacement relative to the first scrn.
@@ -51,10 +53,12 @@ void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
}
/**
- * Report the rectangle relative to which absolute pointer events should be
- * expressed. This information remains valid until the next VBVA resize event
- * for any screen, at which time it is reset to the bounding rectangle of all
- * virtual screens.
+ * hgsmi_update_input_mapping - Report the rectangle relative to which absolute
+ * pointer events should be expressed. This
+ * information remains valid until the next VBVA
+ * resize event for any screen, at which time it is
+ * reset to the bounding rectangle of all virtual
+ * screens.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to use.
* @origin_x: Upper left X co-ordinate relative to the first screen.
@@ -84,7 +88,7 @@ int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
}
/**
- * Get most recent video mode hints.
+ * hgsmi_get_mode_hints - Get most recent video mode hints.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to use.
* @screens: The number of screens to query hints for, starting at 0.
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index f9de8632a28b..3c4cc133e3df 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -166,10 +166,9 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- if (vc4_hdmi->hpd_gpio) {
- if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
- vc4_hdmi->hpd_active_low)
- connected = true;
+ if (vc4_hdmi->hpd_gpio &&
+ gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
+ connected = true;
} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
connected = true;
} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
@@ -2105,7 +2104,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
struct vc4_hdmi *vc4_hdmi;
struct drm_encoder *encoder;
struct device_node *ddc_node;
- u32 value;
int ret;
vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
@@ -2144,18 +2142,10 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
- if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
- enum of_gpio_flags hpd_gpio_flags;
-
- vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
- "hpd-gpios", 0,
- &hpd_gpio_flags);
- if (vc4_hdmi->hpd_gpio < 0) {
- ret = vc4_hdmi->hpd_gpio;
- goto err_unprepare_hsm;
- }
-
- vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+ vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(vc4_hdmi->hpd_gpio)) {
+ ret = PTR_ERR(vc4_hdmi->hpd_gpio);
+ goto err_put_ddc;
}
vc4_hdmi->disable_wifi_frequencies =
@@ -2209,8 +2199,8 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
err_destroy_encoder:
drm_encoder_cleanup(encoder);
-err_unprepare_hsm:
pm_runtime_disable(dev);
+err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 39bd2c413ec0..884d245507a9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -148,8 +148,7 @@ struct vc4_hdmi {
/* VC5 Only */
void __iomem *rm_regs;
- int hpd_gpio;
- bool hpd_active_low;
+ struct gpio_desc *hpd_gpio;
/*
* On some systems (like the RPi4), some modes are in the same
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a0e75f1d5d01..bf38a7e319d1 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -25,7 +25,7 @@
* Ben Widawsky <ben@bwidawsk.net>
*/
-/**
+/*
* This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
* software renderer and the X server for efficient buffer sharing.
*/
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2902dc6e64fa..bd6f75285fd9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled_rcu(resv,
- arg->flags & VGEM_FENCE_WRITE)) {
+ if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 669f2ee39515..5c1ad1596889 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, true);
} else {
- ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
}
if (ret == 0)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index cdbd5a870711..09fe20e918f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -483,10 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->mem.num_pages;
- d.src_num_pages = src->mem.num_pages;
- d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
+ d.dst_num_pages = dst->resource->num_pages;
+ d.src_num_pages = src->resource->num_pages;
+ d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 04dd49c4c257..362f56d5b12b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -103,7 +103,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
+ ret = ttm_bo_mem_compat(placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -145,7 +145,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->mem.num_pages;
+ place.lpfn = bo->resource->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -227,22 +227,22 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->mem.num_pages &&
- bo->mem.start > 0 &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ bo->resource->start < bo->resource->num_pages &&
+ bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->mem.start != 0);
+ WARN_ON(ret == 0 && bo->resource->start != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -293,11 +293,11 @@ err:
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->mem.start << PAGE_SHIFT;
+ ptr->offset = bo->resource->start << PAGE_SHIFT;
} else {
- ptr->gmrId = bo->mem.start;
+ ptr->gmrId = bo->resource->start;
ptr->offset = 0;
}
}
@@ -316,7 +316,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
+ uint32_t old_mem_type = bo->resource->mem_type;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -326,8 +326,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
pl.fpfn = 0;
pl.lpfn = 0;
- pl.mem_type = bo->mem.mem_type;
- pl.flags = bo->mem.placement;
+ pl.mem_type = bo->resource->mem_type;
+ pl.flags = bo->resource->placement;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
@@ -335,7 +335,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
ret = ttm_bo_validate(bo, &placement, &ctx);
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+ BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
if (pin)
ttm_bo_pin(bo);
@@ -369,7 +369,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
@@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout_rcu
- (bo->base.resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ lret = dma_resv_wait_timeout(bo->base.resv, true, true,
+ nonblock ? 0 :
+ MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1197,7 +1197,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* With other types of moves, the underlying pages stay the same,
* and the map can be kept.
*/
- if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
vmw_bo_unmap(vbo);
/*
@@ -1205,6 +1205,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* read back all resource content first, and unbind the MOB from
* the resource.
*/
- if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 9c89189a226d..956b85e35cef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -576,11 +576,11 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
- cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT;
+ cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
} else {
- cmd->body.guestResult.gmrId = bo->mem.start;
+ cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
}
@@ -621,8 +621,8 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd->body.mobid = bo->mem.start;
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->resource->start;
cmd->body.offset = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 05ca310ed61a..6bb4961e64a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -889,7 +889,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3ed9914cb994..dffe3804ad3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -346,7 +346,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -355,7 +355,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -385,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_state_scrub(uctx->cbs);
@@ -513,7 +513,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -522,7 +522,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -594,7 +594,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b40aa002bf2b..c84a16c1def0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -173,7 +173,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
SVGA3dCmdDXSetCOTable body;
} *cmd;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
@@ -181,12 +181,12 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
return -ENOMEM;
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
- WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
@@ -315,7 +315,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (!vmw_resource_mob_attached(res))
return 0;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
@@ -431,7 +431,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->mem.num_pages; ++i) {
+ for (i = 0; i < old_bo->resource->num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 32a84dff3fbf..a2b8464b3f56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -735,7 +735,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
- cmd->body.mobid = dx_query_mob->base.mem.start;
+ cmd->body.mobid = dx_query_mob->base.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1046,7 +1046,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -3710,16 +3710,16 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
bo = &reloc->vbo->base;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
- reloc->location->offset += bo->mem.start << PAGE_SHIFT;
+ reloc->location->offset += bo->resource->start << PAGE_SHIFT;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
- reloc->location->gmrId = bo->mem.start;
+ reloc->location->gmrId = bo->resource->start;
break;
case VMW_PL_MOB:
- *reloc->mob_loc = bo->mem.start;
+ *reloc->mob_loc = bo->resource->start;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 1774960d1b89..28ceb749a733 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -52,11 +52,17 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;
+ *res = kmalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -64,36 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += mem->num_pages;
+ gman->used_gmr_pages += (*res)->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
- mem->mm_node = gman;
- mem->start = id;
+ (*res)->start = id;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= mem->num_pages;
+ gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
+ kfree(*res);
return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
- if (mem->mm_node) {
- ida_free(&gman->gmr_ida, mem->start);
- spin_lock(&gman->lock);
- gman->used_gmr_pages -= mem->num_pages;
- spin_unlock(&gman->lock);
- mem->mm_node = NULL;
- }
+ ida_free(&gman->gmr_ida, res->start);
+ spin_lock(&gman->lock);
+ gman->used_gmr_pages -= res->num_pages;
+ spin_unlock(&gman->lock);
+ kfree(res);
}
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 45c9c6a7f1d6..e5a9a5cbd01a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.mem.num_pages;
+ pgoff_t num_pages = vbo->base.resource->num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->mem.num_pages)) {
+ if (unlikely(page_offset >= bo->resource->num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -529,7 +529,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 62ea920addc3..7b45393ad98e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
if (bo->moving)
dma_fence_put(bo->moving);
bo->moving = dma_fence_get
- (dma_resv_get_excl(bo->base.resv));
+ (dma_resv_excl_fence(bo->base.resv));
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b391975871a5..b3c8d2da6f1a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -254,7 +254,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -263,7 +263,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -282,7 +282,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd;
struct vmw_fence_obj *fence;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -402,7 +402,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -450,7 +450,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
@@ -513,7 +513,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence;
int ret;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 1dd042a20a66..c8efa4a6c995 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -106,7 +106,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -142,7 +142,7 @@ static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
int ret;
- if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence;
int ret;
- if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 8ead06574850..0835468bb2ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1212,7 +1212,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
@@ -1223,7 +1223,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
- cmd1->body.mobid = bo->mem.start;
+ cmd1->body.mobid = bo->resource->start;
if (res->backup_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
@@ -1266,7 +1266,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 5ccc35b3194c..2a3d3468e4e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -7,6 +7,7 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -50,20 +51,22 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node;
+ struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
+ ttm_resource_init(bo, place, &node->base);
+
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
@@ -75,23 +78,26 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
- place, mem, lpfn, mode);
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place,
+ &node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
- mem, lpfn, mode);
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place, &node->base,
+ lpfn, mode);
if (!ret)
goto found_unlock;
}
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
@@ -100,28 +106,24 @@ found_unlock:
if (unlikely(ret)) {
kfree(node);
} else {
- mem->mm_node = node;
- mem->start = node->start;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
}
return ret;
}
-
-
static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_remove_node(mem->mm_node);
- spin_unlock(&rman->lock);
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
- }
+ kfree(node);
}
int vmw_thp_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 35b03fe21161..0488042fb287 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -719,7 +719,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
@@ -729,11 +729,11 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
- vmw_move_notify(bo, &bo->mem, new_mem);
+ vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- ttm_bo_assign_mem(bo, new_mem);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_wait_ctx(bo, ctx);
@@ -741,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
return 0;
} else {
@@ -751,7 +751,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
}
return 0;
fail:
- vmw_move_notify(bo, new_mem, &bo->mem);
+ vmw_move_notify(bo, new_mem, bo->resource);
return ret;
}