summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-11-11 00:04:10 -0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-11 00:04:10 -0800
commit2ec0859039ecddc95f5d94c134d01aa639a49622 (patch)
tree430d0a7a1f38b8d50a9bf455c1835426fda6a571 /mm/page_alloc.c
parent73da523802eafafe7e55a5e8a8bc8ee3f5cf3b9b (diff)
parente7ac4daeed91a25382091e73818ea0cddb1afd5e (diff)
downloadlinux-2ec0859039ecddc95f5d94c134d01aa639a49622.tar.gz
linux-2ec0859039ecddc95f5d94c134d01aa639a49622.tar.bz2
linux-2ec0859039ecddc95f5d94c134d01aa639a49622.zip
Merge branch 'mm-hotfixes-stable' into mm-stable
Pick up e7ac4daeed91 ("mm: count zeromap read and set for swapout and swapin") in order to move mm: define obj_cgroup_get() if CONFIG_MEMCG is not defined mm: zswap: modify zswap_compress() to accept a page instead of a folio mm: zswap: rename zswap_pool_get() to zswap_pool_tryget() mm: zswap: modify zswap_stored_pages to be atomic_long_t mm: zswap: support large folios in zswap_store() mm: swap: count successful large folio zswap stores in hugepage zswpout stats mm: zswap: zswap_store_page() will initialize entry after adding to xarray. mm: add per-order mTHP swpin counters from mm-unstable into mm-stable.
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0d6301120fc4..744c1a413fdb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -635,6 +635,8 @@ compaction_capture(struct capture_control *capc, struct page *page,
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
+ lockdep_assert_held(&zone->lock);
+
if (is_migrate_isolate(migratetype))
return;
@@ -642,6 +644,9 @@ static inline void account_freepages(struct zone *zone, int nr_pages,
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+ else if (is_migrate_highatomic(migratetype))
+ WRITE_ONCE(zone->nr_free_highatomic,
+ zone->nr_free_highatomic + nr_pages);
}
/* Used for pages not on another list */
@@ -3079,11 +3084,10 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
/*
* If the caller does not have rights to reserves below the min
- * watermark then subtract the high-atomic reserves. This will
- * over-estimate the size of the atomic reserve but it avoids a search.
+ * watermark then subtract the free pages reserved for highatomic.
*/
if (likely(!(alloc_flags & ALLOC_RESERVES)))
- unusable_free += z->nr_reserved_highatomic;
+ unusable_free += READ_ONCE(z->nr_free_highatomic);
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */