summaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 12:37:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 12:37:02 -0800
commit6020c204be997e3f5129839ff9c801800fb4336e (patch)
tree2125670ece9bed7951946b8badd6f40cf5963570 /mm/swap.c
parent81ff0be4b9e3bcfee022d71cf89d72f7e2ed41ba (diff)
parent6b24ca4a1a8d4ee3221d6d44ddbb99f542e4bda3 (diff)
downloadlinux-6020c204be997e3f5129839ff9c801800fb4336e.tar.gz
linux-6020c204be997e3f5129839ff9c801800fb4336e.tar.bz2
linux-6020c204be997e3f5129839ff9c801800fb4336e.zip
Merge tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache
Pull folio conversion updates from Matthew Wilcox: "Convert much of the page cache to use folios This stops just short of actually enabling large folios. It converts everything that I noticed needs to be converted, but there may still be places I've overlooked which still have page size assumptions. The big change here is using large entries in the page cache XArray instead of many small entries. That only affects shmem for now, but it's a pretty big change for shmem since it changes where memory needs to be allocated (at split time instead of insertion)" * tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache: (49 commits) mm: Use multi-index entries in the page cache XArray: Add xas_advance() truncate,shmem: Handle truncates that split large folios truncate: Convert invalidate_inode_pages2_range to folios fs: Convert vfs_dedupe_file_range_compare to folios mm: Remove pagevec_remove_exceptionals() mm: Convert find_lock_entries() to use a folio_batch filemap: Return only folios from find_get_entries() filemap: Convert filemap_get_read_batch() to use a folio_batch filemap: Convert filemap_read() to use a folio truncate: Add invalidate_complete_folio2() truncate: Convert invalidate_inode_pages2_range() to use a folio truncate: Skip known-truncated indices truncate,shmem: Add truncate_inode_folio() shmem: Convert part of shmem_undo_range() to use a folio mm: Add unmap_mapping_folio() truncate: Add truncate_cleanup_folio() filemap: Add filemap_release_folio() filemap: Use a folio in filemap_page_mkwrite filemap: Use a folio in filemap_map_pages ...
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/swap.c b/mm/swap.c
index e8c9dc6d0377..74f6b311d7ee 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1077,24 +1077,24 @@ void __pagevec_lru_add(struct pagevec *pvec)
}
/**
- * pagevec_remove_exceptionals - pagevec exceptionals pruning
- * @pvec: The pagevec to prune
+ * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
+ * @fbatch: The batch to prune
*
- * find_get_entries() fills both pages and XArray value entries (aka
- * exceptional entries) into the pagevec. This function prunes all
- * exceptionals from @pvec without leaving holes, so that it can be
- * passed on to page-only pagevec operations.
+ * find_get_entries() fills a batch with both folios and shadow/swap/DAX
+ * entries. This function prunes all the non-folio entries from @fbatch
+ * without leaving holes, so that it can be passed on to folio-only batch
+ * operations.
*/
-void pagevec_remove_exceptionals(struct pagevec *pvec)
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
{
- int i, j;
+ unsigned int i, j;
- for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- if (!xa_is_value(page))
- pvec->pages[j++] = page;
+ for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
+ struct folio *folio = fbatch->folios[i];
+ if (!xa_is_value(folio))
+ fbatch->folios[j++] = folio;
}
- pvec->nr = j;
+ fbatch->nr = j;
}
/**