summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2024-11-05 01:52:57 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-11 17:22:26 -0800
commitda0c02516c501b43bd39ad4aca5779c86153d143 (patch)
treefb6d62186590f2191f808c14432425a055ae10e1 /mm
parentfb56fdf8b9a2f7397f8a83dce50189f3f0cf71af (diff)
downloadlinux-da0c02516c501b43bd39ad4aca5779c86153d143.tar.gz
linux-da0c02516c501b43bd39ad4aca5779c86153d143.tar.bz2
linux-da0c02516c501b43bd39ad4aca5779c86153d143.zip
mm/list_lru: simplify the list_lru walk callback function
Now isolation no longer takes the list_lru global node lock, only use the per-cgroup lock instead. And this lock is inside the list_lru_one being walked, no longer needed to pass the lock explicitly. Link: https://lkml.kernel.org/r/20241104175257.60853-7-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Cc: Chengming Zhou <zhouchengming@bytedance.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Waiman Long <longman@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/workingset.c15
-rw-r--r--mm/zswap.c4
3 files changed, 10 insertions, 11 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index c139202e27f7..f93ada6a207b 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -298,7 +298,7 @@ restart:
break;
--*nr_to_walk;
- ret = isolate(item, l, &l->lock, cb_arg);
+ ret = isolate(item, l, cb_arg);
switch (ret) {
/*
* LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
diff --git a/mm/workingset.c b/mm/workingset.c
index c187d4a3fbea..a4705e196545 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -702,8 +702,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
static enum lru_status shadow_lru_isolate(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
- void *arg) __must_hold(lru_lock)
+ void *arg) __must_hold(lru->lock)
{
struct xa_node *node = container_of(item, struct xa_node, private_list);
struct address_space *mapping;
@@ -712,20 +711,20 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
/*
* Page cache insertions and deletions synchronously maintain
* the shadow node LRU under the i_pages lock and the
- * lru_lock. Because the page cache tree is emptied before
- * the inode can be destroyed, holding the lru_lock pins any
+ * &lru->lock. Because the page cache tree is emptied before
+ * the inode can be destroyed, holding the &lru->lock pins any
* address_space that has nodes on the LRU.
*
* We can then safely transition to the i_pages lock to
* pin only the address_space of the particular node we want
- * to reclaim, take the node off-LRU, and drop the lru_lock.
+ * to reclaim, take the node off-LRU, and drop the &lru->lock.
*/
mapping = container_of(node->array, struct address_space, i_pages);
/* Coming from the list, invert the lock order */
if (!xa_trylock(&mapping->i_pages)) {
- spin_unlock_irq(lru_lock);
+ spin_unlock_irq(&lru->lock);
ret = LRU_RETRY;
goto out;
}
@@ -734,7 +733,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
if (mapping->host != NULL) {
if (!spin_trylock(&mapping->host->i_lock)) {
xa_unlock(&mapping->i_pages);
- spin_unlock_irq(lru_lock);
+ spin_unlock_irq(&lru->lock);
ret = LRU_RETRY;
goto out;
}
@@ -743,7 +742,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
list_lru_isolate(lru, item);
__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
- spin_unlock(lru_lock);
+ spin_unlock(&lru->lock);
/*
* The nodes should only contain one or more shadow entries,
diff --git a/mm/zswap.c b/mm/zswap.c
index ba35e4550941..f6316b66fb23 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1102,7 +1102,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* for reclaim by this ratio.
*/
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
- spinlock_t *lock, void *arg)
+ void *arg)
{
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
bool *encountered_page_in_swapcache = (bool *)arg;
@@ -1158,7 +1158,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
* It's safe to drop the lock here because we return either
* LRU_REMOVED_RETRY or LRU_RETRY.
*/
- spin_unlock(lock);
+ spin_unlock(&l->lock);
writeback_result = zswap_writeback_entry(entry, swpentry);