summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c321
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c13
20 files changed, 376 insertions, 220 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6416b6907aeb..f9e962fd94d0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENOMEM;
if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
goto fail_free;
}
@@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
&nv_crtc->cursor.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 3ee836dc5058..7739f46470d3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -134,7 +134,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!fb || !fb->obj[0])
continue;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
@@ -144,7 +144,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!nv_crtc->cursor.nvbo)
continue;
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 193ba9498f3d..37e63e98cd08 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
@@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 7799530e07c1..b111fe24a06b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2069,6 +2069,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
if (atom->lock_core)
mutex_lock(&disp->mutex);
@@ -2622,10 +2623,11 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 447ecc9fec42..0356474ad6f6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return 0;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 21537ca1dd39..9a5be6f32424 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
- ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
+ false);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 9140387f30dc..2ee75646ad6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -44,6 +44,9 @@
#include <nvif/if500b.h>
#include <nvif/if900b.h>
+static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+ struct ttm_resource *reg);
+
/*
* NV10-NV40 tiling helpers
*/
@@ -137,6 +140,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->pin_refcnt > 0);
+ nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
@@ -158,8 +162,7 @@ roundup_64(u64 x, u32 y)
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, u64 *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device;
@@ -192,7 +195,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
struct nouveau_bo *
-nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
@@ -218,7 +221,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new().
*/
- if (flags & TTM_PL_FLAG_UNCACHED) {
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
@@ -258,9 +261,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT) &&
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
@@ -287,13 +290,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
}
nvbo->page = vmm->page[pi].shift;
- nouveau_bo_fixup_align(nvbo, flags, align, size);
+ nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
}
int
-nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
@@ -303,7 +306,8 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
- nouveau_bo_placement_set(nvbo, flags, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -318,19 +322,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@@ -339,27 +343,49 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
+ uint32_t domain, uint32_t flags)
{
*n = 0;
- if (type & TTM_PL_FLAG_VRAM)
- pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
- if (type & TTM_PL_FLAG_TT)
- pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
- if (type & TTM_PL_FLAG_SYSTEM)
- pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ const u8 type = mmu->type[drm->ttm.type_vram].type;
+
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
+
+ /* Some BARs do not support being ioremapped WC */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ type & NVIF_MEM_UNCACHED)
+ pl[*n].flags &= ~TTM_PL_FLAG_WC;
+
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = flags;
+
+ if (drm->agp.bridge)
+ pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
+
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[(*n)++].flags = flags;
+ }
}
static void
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
@@ -386,26 +412,28 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
}
void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
+ uint32_t busy)
{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_placement *pl = &nvbo->placement;
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
TTM_PL_MASK_CACHING) |
(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement,
- type, flags);
+ set_placement_list(drm, nvbo->placements, &pl->num_placement,
+ domain, flags);
pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- type | busy, flags);
+ set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
+ domain | busy, flags);
- set_placement_range(nvbo, type);
+ set_placement_range(nvbo, domain);
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
@@ -417,7 +445,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- memtype == TTM_PL_FLAG_VRAM && contig) {
+ domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) {
nvbo->contig = true;
force = true;
@@ -426,10 +454,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (nvbo->pin_refcnt) {
- if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ bool error = evict;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
+ break;
+ case TTM_PL_TT:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ default:
+ break;
+ }
+
+ if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
+ bo->mem.mem_type, domain);
ret = -EBUSY;
}
nvbo->pin_refcnt++;
@@ -437,14 +477,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (evict) {
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
}
nvbo->pin_refcnt++;
- nouveau_bo_placement_set(nvbo, memtype, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
/* drop pin_refcnt temporarily, so we don't trip the assertion
* in nouveau_bo_move() that makes sure we're not trying to
@@ -490,7 +530,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ref)
goto out;
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
+ break;
+ case TTM_PL_TT:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+ break;
+ default:
+ break;
+ }
ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
@@ -574,6 +623,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_del_init(&nvbo->io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -646,6 +715,36 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
return nouveau_sgdma_create_ttm(bo, page_flags);
}
+static int
+nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+ struct ttm_resource *reg)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+#endif
+ if (!reg)
+ return -EINVAL;
+#if IS_ENABLED(CONFIG_AGP)
+ if (drm->agp.bridge)
+ return ttm_agp_bind(ttm, reg);
+#endif
+ return nouveau_sgdma_bind(bdev, ttm, reg);
+}
+
+static void
+nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+
+ if (drm->agp.bridge) {
+ ttm_agp_unbind(ttm);
+ return;
+ }
+#endif
+ nouveau_sgdma_unbind(bdev, ttm);
+}
+
static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
@@ -653,11 +752,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
- TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ NOUVEAU_GEM_DOMAIN_CPU);
break;
default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break;
}
@@ -725,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
- evict,
+ evict, false,
new_reg);
nouveau_fence_unref(&fence);
}
@@ -811,7 +910,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ .mem_type = TTM_PL_TT,
+ .flags = TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
@@ -826,7 +926,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ if (ret)
+ goto out;
+
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
if (ret)
goto out;
@@ -848,7 +952,8 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ .mem_type = TTM_PL_TT,
+ .flags = TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
@@ -888,6 +993,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
if (bo->destroy != nouveau_bo_del_ttm)
return;
+ nouveau_bo_del_io_reserve_lru(bo);
+
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) {
@@ -969,9 +1076,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Fake bo copy. */
if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
- BUG_ON(bo->mem.mm_node != NULL);
- bo->mem = *new_reg;
- new_reg->mm_node = NULL;
+ ttm_bo_move_null(bo, new_reg);
goto out;
}
@@ -1018,32 +1123,60 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data);
}
+static void
+nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
+ struct ttm_resource *reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ switch (reg->mem_type) {
+ case TTM_PL_TT:
+ if (mem->kind)
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ case TTM_PL_VRAM:
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+retry:
switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
- return 0;
+ ret = 0;
+ goto out;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- reg->bus.offset = reg->start << PAGE_SHIFT;
- reg->bus.base = drm->agp.base;
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
+ drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
}
#endif
- if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
+ if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
+ !mem->kind) {
/* untiled */
+ ret = 0;
break;
+ }
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
- reg->bus.offset = reg->start << PAGE_SHIFT;
- reg->bus.base = device->func->resource_addr(device, 1);
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
+ device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union {
@@ -1052,7 +1185,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
} args;
u64 handle, length;
u32 argc = 0;
- int ret;
switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50:
@@ -1078,39 +1210,46 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
&handle, &length);
if (ret != 1) {
if (WARN_ON(ret == 0))
- return -EINVAL;
- return ret;
+ ret = -EINVAL;
+ goto out;
}
- reg->bus.base = 0;
reg->bus.offset = handle;
+ ret = 0;
}
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+
+out:
+ if (ret == -ENOSPC) {
+ struct nouveau_bo *nvbo;
+
+ nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
+ typeof(*nvbo),
+ io_reserve_lru);
+ if (nvbo) {
+ list_del_init(&nvbo->io_reserve_lru);
+ drm_vma_node_unmap(&nvbo->bo.base.vma_node,
+ bdev->dev_mapping);
+ nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ goto retry;
+ }
+
+ }
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+ return ret;
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_mem *mem = nouveau_mem(reg);
- if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
- switch (reg->mem_type) {
- case TTM_PL_TT:
- if (mem->kind)
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- case TTM_PL_VRAM:
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- default:
- break;
- }
- }
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ nouveau_ttm_io_mem_free_locked(drm, reg);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
}
static int
@@ -1131,7 +1270,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
@@ -1155,35 +1295,36 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->busy_placements[i].lpfn = mappable;
}
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false);
}
static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
- if (ttm->state != tt_unpopulated)
+ if (ttm_tt_is_populated(ttm))
return 0;
if (slave && ttm->sg) {
/* make userspace faulting work */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
ttm_dma->dma_address, ttm->num_pages);
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
- drm = nouveau_bdev(ttm->bdev);
+ drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- return ttm_agp_tt_populate(ttm, ctx);
+ return ttm_pool_populate(ttm, ctx);
}
#endif
@@ -1196,7 +1337,8 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
}
static void
-nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
@@ -1206,12 +1348,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- drm = nouveau_bdev(ttm->bdev);
+ drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- ttm_agp_tt_unpopulate(ttm);
+ ttm_pool_unpopulate(ttm);
return;
}
#endif
@@ -1226,6 +1368,22 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
}
+static void
+nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ if (drm->agp.bridge) {
+ ttm_agp_unbind(ttm);
+ ttm_tt_destroy_common(bdev, ttm);
+ ttm_agp_destroy(ttm);
+ return;
+ }
+#endif
+ nouveau_sgdma_destroy(bdev, ttm);
+}
+
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
@@ -1241,6 +1399,9 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
+ .ttm_tt_bind = &nouveau_ttm_tt_bind,
+ .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
+ .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index aecb7481df0d..2a23c8207436 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -18,6 +18,7 @@ struct nouveau_bo {
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
+ struct list_head io_reserve_lru;
/* protected by ttm_bo_reserve() */
struct drm_file *reserved_by;
@@ -76,10 +77,10 @@ extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
- u32 flags, u32 tile_mode, u32 tile_flags);
-int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
+ u32 domain, u32 tile_mode, u32 tile_flags);
+int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
-int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
+int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
@@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
@@ -119,13 +122,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
}
static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
+nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
struct nouveau_bo **pnvbo)
{
- int ret = nouveau_bo_new(cli, size, align, flags,
+ int ret = nouveau_bo_new(cli, size, align, domain,
0, 0, NULL, NULL, pnvbo);
if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, flags, true);
+ ret = nouveau_bo_pin(*pnvbo, domain, true);
if (ret == 0) {
ret = nouveau_bo_map(*pnvbo);
if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index b80e4ebf14a6..8f099601d2f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
- target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
if (nouveau_vram_pushbuf)
- target = TTM_PL_FLAG_VRAM;
+ target = NOUVEAU_GEM_DOMAIN_VRAM;
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 4e8112fde3e6..af70ef8e5471 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -254,12 +254,12 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
goto out_bo_free;
@@ -346,7 +346,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 73ebf5fba2fc..b8025507a9e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -164,6 +164,8 @@ struct nouveau_drm {
int type_vram;
int type_host[2];
int type_ncoh[2];
+ struct mutex io_reserve_mutex;
+ struct list_head io_reserve_lru;
} ttm;
/* GEM interface support */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index fad8030ec1f8..24ec5339efb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
if (ret)
goto out_unref;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
@@ -378,8 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
+ info->fix.smem_start = nvbo->bo.mem.bus.offset;
info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 81f111ad3f4f..89adadf4706b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -176,20 +176,12 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
- u32 flags = 0;
int ret;
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
+ if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
+ domain |= NOUVEAU_GEM_DOMAIN_CPU;
- if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
- flags |= TTM_PL_FLAG_UNCACHED;
-
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
@@ -202,7 +194,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
return ret;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ret;
@@ -296,32 +288,28 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
- uint32_t pref_flags = 0, valid_flags = 0;
+ uint32_t pref_domains = 0;;
if (!domains)
return -EINVAL;
- if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
- valid_flags |= TTM_PL_FLAG_VRAM;
-
- if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
- valid_flags |= TTM_PL_FLAG_TT;
+ valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
- nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
+ nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index bae6a3eccee0..b2ecb91f8ddc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
- return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
}
void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
@@ -64,14 +64,12 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
- u32 flags = 0;
int align = 0;
int ret;
- flags = TTM_PL_FLAG_TT;
-
dma_resv_lock(robj, NULL);
- nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
+ nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
+ NOUVEAU_GEM_DOMAIN_GART, 0, 0);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
@@ -88,7 +86,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
goto unlock;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
+ sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
@@ -108,7 +107,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index eef75c53a197..806d9ec310f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,87 +14,65 @@ struct nouveau_sgdma_be {
struct nouveau_mem *mem;
};
-static void
-nouveau_sgdma_destroy(struct ttm_tt *ttm)
+void
+nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
+ nouveau_sgdma_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
-static int
-nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg)
+int
+nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
+ if (nvbe->mem)
+ return 0;
+
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
- ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
- if (ret) {
- nouveau_mem_fini(mem);
- return ret;
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ if (ret) {
+ nouveau_mem_fini(mem);
+ return ret;
+ }
}
nvbe->mem = mem;
return 0;
}
-static void
-nv04_sgdma_unbind(struct ttm_tt *ttm)
+void
+nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nouveau_mem_fini(nvbe->mem);
-}
-
-static struct ttm_backend_func nv04_sgdma_backend = {
- .bind = nv04_sgdma_bind,
- .unbind = nv04_sgdma_unbind,
- .destroy = nouveau_sgdma_destroy
-};
-
-static int
-nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nouveau_mem *mem = nouveau_mem(reg);
- int ret;
-
- ret = nouveau_mem_host(reg, &nvbe->ttm);
- if (ret)
- return ret;
-
- nvbe->mem = mem;
- return 0;
+ if (nvbe->mem) {
+ nouveau_mem_fini(nvbe->mem);
+ nvbe->mem = NULL;
+ }
}
-static struct ttm_backend_func nv50_sgdma_backend = {
- .bind = nv50_sgdma_bind,
- .unbind = nv04_sgdma_unbind,
- .destroy = nouveau_sgdma_destroy
-};
-
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
- struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
- nvbe->ttm.ttm.func = &nv04_sgdma_backend;
- else
- nvbe->ttm.ttm.func = &nv50_sgdma_backend;
-
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
kfree(nvbe);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 53c6f8827322..427341753441 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -123,13 +123,51 @@ const struct ttm_resource_manager_func nv04_gart_manager = {
.free = nouveau_manager_del,
};
+static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ nouveau_bo_del_io_reserve_lru(bo);
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ nouveau_bo_add_io_reserve_lru(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+
+static struct vm_operations_struct nouveau_ttm_vm_ops = {
+ .fault = nouveau_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
+ int ret;
- return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &nouveau_ttm_vm_ops;
+ return 0;
}
static int
@@ -156,24 +194,13 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
static int
nouveau_ttm_init_vram(struct nouveau_drm *drm)
{
- struct nvif_mmu *mmu = &drm->client.mmu;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- /* Some BARs do not support being ioremapped WC */
- const u8 type = mmu->type[drm->ttm.type_vram].type;
struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
+
if (!man)
return -ENOMEM;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
-
- if (type & NVIF_MEM_UNCACHED) {
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
- }
-
man->func = &nouveau_vram_manager;
- man->use_io_reserve_lru = true;
ttm_resource_manager_init(man,
drm->gem.vram_available >> PAGE_SHIFT);
@@ -181,9 +208,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
ttm_resource_manager_set_used(man, true);
return 0;
} else {
- return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM,
- TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
- TTM_PL_FLAG_WC, false,
+ return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
drm->gem.vram_available >> PAGE_SHIFT);
}
}
@@ -208,25 +233,14 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
{
struct ttm_resource_manager *man;
unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
- unsigned available_caching, default_caching;
const struct ttm_resource_manager_func *func = NULL;
- if (drm->agp.bridge) {
- available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- default_caching = TTM_PL_FLAG_WC;
- } else {
- available_caching = TTM_PL_MASK_CACHING;
- default_caching = TTM_PL_FLAG_CACHED;
- }
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
func = &nouveau_gart_manager;
else if (!drm->agp.bridge)
func = &nv04_gart_manager;
else
- return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT,
- available_caching, default_caching,
- true,
+ return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
size_pages);
man = kzalloc(sizeof(*man), GFP_KERNEL);
@@ -234,8 +248,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
return -ENOMEM;
man->func = func;
- man->available_caching = available_caching;
- man->default_caching = default_caching;
man->use_tt = true;
ttm_resource_manager_init(man, size_pages);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
@@ -339,6 +351,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
+ mutex_init(&drm->ttm.io_reserve_mutex);
+ INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
+
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index eaf25461cd91..69552049bb96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -22,4 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
+int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
+void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 6b697ee6bc0e..1253fdec712d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -130,10 +130,11 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 49b46f51073c..447238e3cbe7 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -81,10 +81,11 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 7ed36b3a6b7d..7c9c928c3196 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -209,12 +209,13 @@ nv84_fence_create(struct nouveau_drm *drm)
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
- domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
- /*
- * fences created in sysmem must be non-cached or we
- * will lose CPU/GPU coherency!
- */
- TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ domain = drm->client.device.info.ram_size != 0 ?
+ NOUVEAU_GEM_DOMAIN_VRAM :
+ /*
+ * fences created in sysmem must be non-cached or we
+ * will lose CPU/GPU coherency!
+ */
+ NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
domain, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {