summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst9
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst2
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/proc/vmcore.c9
-rw-r--r--include/linux/memcontrol.h12
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/user_namespace.h3
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/ucount.c9
-rw-r--r--lib/objpool.c18
-rw-r--r--mm/damon/core.c42
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/page_io.c16
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/zswap.c6
-rw-r--r--tools/testing/selftests/mm/hugetlb_dio.c19
21 files changed, 127 insertions, 55 deletions
diff --git a/.mailmap b/.mailmap
index b2cc9ee33caa..44f26600e338 100644
--- a/.mailmap
+++ b/.mailmap
@@ -666,6 +666,7 @@ Tomeu Vizoso <tomeu@tomeuvizoso.net> <tomeu.vizoso@collabora.com>
Thomas Graf <tgraf@suug.ch>
Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu>
Thomas Pedersen <twp@codeaurora.org>
+Thorsten Blum <thorsten.blum@linux.dev> <thorsten.blum@toblux.com>
Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
Tingwei Zhang <quic_tingwei@quicinc.com> <tingwei@codeaurora.org>
Tirupathi Reddy <quic_tirupath@quicinc.com> <tirupath@codeaurora.org>
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 69af2173555f..6d02168d78be 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1599,6 +1599,15 @@ The following nested keys are defined.
pglazyfreed (npn)
Amount of reclaimed lazyfree pages
+ swpin_zero
+ Number of pages swapped into memory and filled with zero, where I/O
+ was optimized out because the page content was detected to be zero
+ during swapout.
+
+ swpout_zero
+ Number of zero-filled pages swapped out with I/O skipped due to the
+ content being detected as zero.
+
zswpin
Number of pages moved in to memory from zswap.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1518343bbe22..1666576acc0e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6688,7 +6688,7 @@
0: no polling (default)
thp_anon= [KNL]
- Format: <size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>
+ Format: <size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state>
state is one of "always", "madvise", "never" or "inherit".
Control the default behavior of the system with respect
to anonymous transparent hugepages.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index cfdd16a52e39..a1bb495eab59 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -303,7 +303,7 @@ control by passing the parameter ``transparent_hugepage=always`` or
kernel command line.
Alternatively, each supported anonymous THP size can be controlled by
-passing ``thp_anon=<size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>``,
+passing ``thp_anon=<size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state>``,
where ``<size>`` is the THP size (must be a power of 2 of PAGE_SIZE and
supported anonymous THP) and ``<state>`` is one of ``always``, ``madvise``,
``never`` or ``inherit``.
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index dd0a05365e79..73a6f6fd8a8e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -2036,8 +2036,7 @@ static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
rc = 0;
ocfs2_xa_cleanup_value_truncate(loc, "removing",
orig_clusters);
- if (rc)
- goto out;
+ goto out;
}
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index b52d85f8ad59..b4521b096058 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -457,10 +457,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
#endif
}
-static const struct vm_operations_struct vmcore_mmap_ops = {
- .fault = mmap_vmcore_fault,
-};
-
/**
* vmcore_alloc_buf - allocate buffer in vmalloc memory
* @size: size of buffer
@@ -488,6 +484,11 @@ static inline char *vmcore_alloc_buf(size_t size)
* virtually contiguous user-space in ELF layout.
*/
#ifdef CONFIG_MMU
+
+static const struct vm_operations_struct vmcore_mmap_ops = {
+ .fault = mmap_vmcore_fault,
+};
+
/*
* remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
* reported as not being ram with the zero page.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8e4608be811d..0bd8f61a5597 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1711,8 +1711,9 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1721,7 +1722,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
@@ -1776,8 +1777,9 @@ static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
return NULL;
}
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2e8c4307c728..5e8f567753bd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -825,6 +825,7 @@ struct zone {
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
+ unsigned long nr_free_highatomic;
/*
* We don't know if the memory that we're going to allocate will be
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 3625096d5f85..7183e5aca282 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -141,7 +141,8 @@ static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type ty
long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
+ bool override_rlimit);
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index aed952d04132..f70d0958095c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -134,6 +134,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
+ SWPIN_ZERO,
+ SWPOUT_ZERO,
#ifdef CONFIG_KSM
KSM_SWPIN_COPY,
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 4344860ffcac..cbabb2d05e0a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -419,7 +419,8 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
*/
rcu_read_lock();
ucounts = task_ucounts(t);
- sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
+ sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
+ override_rlimit);
rcu_read_unlock();
if (!sigpending)
return NULL;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 8c07714ff27d..696406939be5 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -307,7 +307,8 @@ void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type)
do_dec_rlimit_put_ucounts(ucounts, NULL, type);
}
-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
+ bool override_rlimit)
{
/* Caller must hold a reference to ucounts */
struct ucounts *iter;
@@ -317,10 +318,11 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
long new = atomic_long_add_return(1, &iter->rlimit[type]);
if (new < 0 || new > max)
- goto unwind;
+ goto dec_unwind;
if (iter == ucounts)
ret = new;
- max = get_userns_rlimit_max(iter->ns, type);
+ if (!override_rlimit)
+ max = get_userns_rlimit_max(iter->ns, type);
/*
* Grab an extra ucount reference for the caller when
* the rlimit count was previously 0.
@@ -334,7 +336,6 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
dec_unwind:
dec = atomic_long_sub_return(1, &iter->rlimit[type]);
WARN_ON_ONCE(dec < 0);
-unwind:
do_dec_rlimit_put_ucounts(ucounts, iter, type);
return 0;
}
diff --git a/lib/objpool.c b/lib/objpool.c
index fd108fe0d095..b998b720c732 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
* warm caches and TLB hits. in default vmalloc is used to
* reduce the pressure of kernel slab system. as we know,
* mimimal size of vmalloc is one page since vmalloc would
- * always align the requested size to page size
+ * always align the requested size to page size.
+ * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
+ * allocate percpu slot with kmalloc.
*/
- if ((pool->gfp & GFP_ATOMIC) == GFP_ATOMIC)
- slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
- else
+ slot = NULL;
+
+ if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
cpu_to_node(i), __builtin_return_address(0));
- if (!slot)
- return -ENOMEM;
+
+ if (!slot) {
+ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+ if (!slot)
+ return -ENOMEM;
+ }
memset(slot, 0, size);
pool->cpu_slots[i] = slot;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index a83f3b736d51..511c3f61ab44 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1412,7 +1412,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
damon_for_each_scheme(s, c) {
struct damos_quota *quota = &s->quota;
- if (c->passed_sample_intervals != s->next_apply_sis)
+ if (c->passed_sample_intervals < s->next_apply_sis)
continue;
if (!s->wmarks.activated)
@@ -1456,17 +1456,31 @@ static unsigned long damon_feed_loop_next_input(unsigned long last_input,
unsigned long score)
{
const unsigned long goal = 10000;
- unsigned long score_goal_diff = max(goal, score) - min(goal, score);
- unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
- unsigned long compensation = last_input * score_goal_diff_bp / 10000;
/* Set minimum input as 10000 to avoid compensation be zero */
const unsigned long min_input = 10000;
+ unsigned long score_goal_diff, compensation;
+ bool over_achieving = score > goal;
- if (goal > score)
+ if (score == goal)
+ return last_input;
+ if (score >= goal * 2)
+ return min_input;
+
+ if (over_achieving)
+ score_goal_diff = score - goal;
+ else
+ score_goal_diff = goal - score;
+
+ if (last_input < ULONG_MAX / score_goal_diff)
+ compensation = last_input * score_goal_diff / goal;
+ else
+ compensation = last_input / goal * score_goal_diff;
+
+ if (over_achieving)
+ return max(last_input - compensation, min_input);
+ if (last_input < ULONG_MAX - compensation)
return last_input + compensation;
- if (last_input > compensation + min_input)
- return last_input - compensation;
- return min_input;
+ return ULONG_MAX;
}
#ifdef CONFIG_PSI
@@ -1622,7 +1636,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
bool has_schemes_to_apply = false;
damon_for_each_scheme(s, c) {
- if (c->passed_sample_intervals != s->next_apply_sis)
+ if (c->passed_sample_intervals < s->next_apply_sis)
continue;
if (!s->wmarks.activated)
@@ -1642,9 +1656,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
}
damon_for_each_scheme(s, c) {
- if (c->passed_sample_intervals != s->next_apply_sis)
+ if (c->passed_sample_intervals < s->next_apply_sis)
continue;
- s->next_apply_sis +=
+ s->next_apply_sis = c->passed_sample_intervals +
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
}
@@ -2000,7 +2014,7 @@ static int kdamond_fn(void *data)
if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx);
- if (ctx->passed_sample_intervals == next_aggregation_sis) {
+ if (ctx->passed_sample_intervals >= next_aggregation_sis) {
kdamond_merge_regions(ctx,
max_nr_accesses / 10,
sz_limit);
@@ -2018,7 +2032,7 @@ static int kdamond_fn(void *data)
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
- if (ctx->passed_sample_intervals == next_aggregation_sis) {
+ if (ctx->passed_sample_intervals >= next_aggregation_sis) {
ctx->next_aggregation_sis = next_aggregation_sis +
ctx->attrs.aggr_interval / sample_interval;
@@ -2028,7 +2042,7 @@ static int kdamond_fn(void *data)
ctx->ops.reset_aggregated(ctx);
}
- if (ctx->passed_sample_intervals == next_ops_update_sis) {
+ if (ctx->passed_sample_intervals >= next_ops_update_sis) {
ctx->next_ops_update_sis = next_ops_update_sis +
ctx->attrs.ops_update_interval /
sample_interval;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bc0033a2aa3c..3402b6d13bc2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -438,6 +438,10 @@ static const unsigned int memcg_vm_event_stat[] = {
PGDEACTIVATE,
PGLAZYFREE,
PGLAZYFREED,
+#ifdef CONFIG_SWAP
+ SWPIN_ZERO,
+ SWPOUT_ZERO,
+#endif
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
diff --git a/mm/mlock.c b/mm/mlock.c
index e3e3dc2b2956..cde076fa7d5e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -725,14 +725,17 @@ static int apply_mlockall_flags(int flags)
}
for_each_vma(vmi, vma) {
+ int error;
vm_flags_t newflags;
newflags = vma->vm_flags & ~VM_LOCKED_MASK;
newflags |= to_add;
- /* Ignore errors */
- mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
- newflags);
+ error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
+ newflags);
+ /* Ignore errors, but prev needs fixing up. */
+ if (error)
+ prev = vma;
cond_resched();
}
out:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0d6301120fc4..744c1a413fdb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -635,6 +635,8 @@ compaction_capture(struct capture_control *capc, struct page *page,
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
+ lockdep_assert_held(&zone->lock);
+
if (is_migrate_isolate(migratetype))
return;
@@ -642,6 +644,9 @@ static inline void account_freepages(struct zone *zone, int nr_pages,
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+ else if (is_migrate_highatomic(migratetype))
+ WRITE_ONCE(zone->nr_free_highatomic,
+ zone->nr_free_highatomic + nr_pages);
}
/* Used for pages not on another list */
@@ -3079,11 +3084,10 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
/*
* If the caller does not have rights to reserves below the min
- * watermark then subtract the high-atomic reserves. This will
- * over-estimate the size of the atomic reserve but it avoids a search.
+ * watermark then subtract the free pages reserved for highatomic.
*/
if (likely(!(alloc_flags & ALLOC_RESERVES)))
- unusable_free += z->nr_reserved_highatomic;
+ unusable_free += READ_ONCE(z->nr_free_highatomic);
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */
diff --git a/mm/page_io.c b/mm/page_io.c
index ef42651d5ef8..39e8be23bd95 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -204,7 +204,9 @@ static bool is_folio_zero_filled(struct folio *folio)
static void swap_zeromap_folio_set(struct folio *folio)
{
+ struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio);
struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ int nr_pages = folio_nr_pages(folio);
swp_entry_t entry;
unsigned int i;
@@ -212,6 +214,12 @@ static void swap_zeromap_folio_set(struct folio *folio)
entry = page_swap_entry(folio_page(folio, i));
set_bit(swp_offset(entry), sis->zeromap);
}
+
+ count_vm_events(SWPOUT_ZERO, nr_pages);
+ if (objcg) {
+ count_objcg_events(objcg, SWPOUT_ZERO, nr_pages);
+ obj_cgroup_put(objcg);
+ }
}
static void swap_zeromap_folio_clear(struct folio *folio)
@@ -505,6 +513,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
static bool swap_read_folio_zeromap(struct folio *folio)
{
int nr_pages = folio_nr_pages(folio);
+ struct obj_cgroup *objcg;
bool is_zeromap;
/*
@@ -519,6 +528,13 @@ static bool swap_read_folio_zeromap(struct folio *folio)
if (!is_zeromap)
return false;
+ objcg = get_obj_cgroup_from_folio(folio);
+ count_vm_events(SWPIN_ZERO, nr_pages);
+ if (objcg) {
+ count_objcg_events(objcg, SWPIN_ZERO, nr_pages);
+ obj_cgroup_put(objcg);
+ }
+
folio_zero_range(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
return true;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7b62bfb19afa..3d82bb906dcf 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1415,6 +1415,8 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_SWAP
"swap_ra",
"swap_ra_hit",
+ "swpin_zero",
+ "swpout_zero",
#ifdef CONFIG_KSM
"ksm_swpin_copy",
#endif
diff --git a/mm/zswap.c b/mm/zswap.c
index a9f4fa121eb2..a400324a45a7 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1053,7 +1053,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
count_vm_event(ZSWPWB);
if (entry->objcg)
- count_objcg_event(entry->objcg, ZSWPWB);
+ count_objcg_events(entry->objcg, ZSWPWB, 1);
zswap_entry_free(entry);
@@ -1483,7 +1483,7 @@ bool zswap_store(struct folio *folio)
if (objcg) {
obj_cgroup_charge_zswap(objcg, entry->length);
- count_objcg_event(objcg, ZSWPOUT);
+ count_objcg_events(objcg, ZSWPOUT, 1);
}
/*
@@ -1577,7 +1577,7 @@ bool zswap_load(struct folio *folio)
count_vm_event(ZSWPIN);
if (entry->objcg)
- count_objcg_event(entry->objcg, ZSWPIN);
+ count_objcg_events(entry->objcg, ZSWPIN, 1);
if (swapcache) {
zswap_entry_free(entry);
diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
index f9ac20c657ec..60001c142ce9 100644
--- a/tools/testing/selftests/mm/hugetlb_dio.c
+++ b/tools/testing/selftests/mm/hugetlb_dio.c
@@ -44,13 +44,6 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
if (fd < 0)
ksft_exit_fail_perror("Error opening file\n");
- /* Get the free huge pages before allocation */
- free_hpage_b = get_free_hugepages();
- if (free_hpage_b == 0) {
- close(fd);
- ksft_exit_skip("No free hugepage, exiting!\n");
- }
-
/* Allocate a hugetlb page */
orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0);
if (orig_buffer == MAP_FAILED) {
@@ -94,8 +87,20 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
int main(void)
{
size_t pagesize = 0;
+ int fd;
ksft_print_header();
+
+ /* Open the file to DIO */
+ fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664);
+ if (fd < 0)
+ ksft_exit_skip("Unable to allocate file: %s\n", strerror(errno));
+ close(fd);
+
+ /* Check if huge pages are free */
+ if (!get_free_hugepages())
+ ksft_exit_skip("No free hugepage, exiting\n");
+
ksft_set_plan(4);
/* Get base page size */