summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c71
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c48
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c45
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c69
9 files changed, 183 insertions, 138 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bb9e02c31946..d62b2013c367 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
+{
+ struct ttm_device *bdev = bo->bdev;
+
+ list_move_tail(&bo->lru, &bdev->pinned);
+
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
+}
+
+static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
return;
}
@@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -914,57 +924,11 @@ out:
return ret;
}
-static bool ttm_bo_places_compat(const struct ttm_place *places,
- unsigned num_placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- unsigned i;
-
- if (mem->placement & TTM_PL_FLAG_TEMPORARY)
- return false;
-
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
-
- if ((mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
-
- *new_flags = heap->flags;
- if ((mem->mem_type == heap->mem_type) &&
- (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
- }
- return false;
-}
-
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- if (ttm_bo_places_compat(placement->placement, placement->num_placement,
- mem, new_flags))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_bo_places_compat(placement->busy_placement,
- placement->num_busy_placement,
- mem, new_flags))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ttm_bo_mem_compat);
-
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
- uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
@@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
+ if (!ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -1151,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
@@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c893c3db2623..a342d701c91c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -147,7 +147,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
- if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
@@ -169,7 +169,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
- if (!(clear && ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)))
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index f56be5bc0861..33680c94127c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -162,9 +162,11 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
}
return 0;
@@ -346,8 +348,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->base.vma_node) +
- page_offset;
pfn = page_to_pfn(page);
}
@@ -519,11 +519,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
- if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(bo->ttm);
- if (unlikely(ret != 0))
- return ret;
- }
fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 2df59b3c2ea1..be24bb6cefd0 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
+ INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ unsigned int i, j;
+
+ spin_lock(&bdev->lru_lock);
+ while (!list_empty(&bdev->pinned)) {
+ bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+ /* Take ref against racing releases once lru_lock is unlocked */
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ while (!list_empty(&man->lru[j])) {
+ bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+ }
+ }
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 7fcdef278c74..0037eefe3239 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -40,6 +40,18 @@
#include "ttm_module.h"
/**
+ * DOC: TTM
+ *
+ * TTM is a memory manager for accelerator devices with dedicated memory.
+ *
+ * The basic idea is that resources are grouped together in buffer objects of
+ * certain size and TTM handles lifetime, movement and CPU mappings of those
+ * objects.
+ *
+ * TODO: Add more design background and information here.
+ */
+
+/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode
* @caching: The ttm caching mode
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cb38b1a17b09..1bba0a0ed3f9 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
- unsigned int num_freed;
+ unsigned int num_pages;
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
- num_freed = 1 << pt->order;
+ num_pages = 1 << pt->order;
} else {
- num_freed = 0;
+ num_pages = 0;
}
- list_move_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
-
- return num_freed;
+ return num_pages;
}
/* Return the allocation order based for a page */
@@ -372,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev);
- if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
@@ -383,7 +382,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false;
struct ttm_pool_type *pt;
@@ -530,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ synchronize_shrinkers();
}
/* As long as pages are available make sure to release at least one */
@@ -604,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
@@ -613,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
@@ -640,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -656,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
@@ -693,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- mutex_init(&shrinker_lock);
+ spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index f4b08a8705b3..67d68a4a8640 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
ttm_resource_manager_set_used(man, true);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_init);
+EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
@@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
kfree(rman);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_fini);
+EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 2431717376e7..035d71332d18 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+static bool ttm_resource_places_compat(struct ttm_resource *res,
+ const struct ttm_place *places,
+ unsigned num_placement)
+{
+ unsigned i;
+
+ if (res->placement & TTM_PL_FLAG_TEMPORARY)
+ return false;
+
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
+
+ if (res->start < heap->fpfn || (heap->lpfn &&
+ (res->start + res->num_pages) > heap->lpfn))
+ continue;
+
+ if ((res->mem_type == heap->mem_type) &&
+ (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ttm_resource_compat - check if resource is compatible with placement
+ *
+ * @res: the resource to check
+ * @placement: the placement to check against
+ *
+ * Returns true if the placement is compatible.
+ */
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement)
+{
+ if (ttm_resource_places_compat(res, placement->placement,
+ placement->num_placement))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_resource_places_compat(res, placement->busy_placement,
+ placement->num_busy_placement))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ttm_resource_compat);
+
/**
* ttm_resource_manager_init
*
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5cd8b5dc0bf..7e83c00a3f48 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
@@ -84,6 +84,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
@@ -122,17 +125,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- ttm_tt_unpopulate(bdev, ttm);
-
- if (ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_destroy_common);
-
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@@ -167,6 +159,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -183,7 +181,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- if (page_flags & TTM_PAGE_FLAG_SG)
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
@@ -229,7 +227,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
@@ -284,7 +282,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
@@ -294,17 +292,6 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- pgoff_t i;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = bdev->dev_mapping;
-}
-
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
@@ -316,7 +303,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
@@ -341,9 +328,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret)
goto error;
- ttm_tt_add_mapping(bdev, ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
@@ -354,7 +340,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
@@ -364,39 +350,24 @@ error:
}
EXPORT_SYMBOL(ttm_tt_populate);
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
- }
-}
-
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
- ttm_tt_clear_mapping(ttm);
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
- ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
#ifdef CONFIG_DEBUG_FS