summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/mm
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/mm')
-rw-r--r--tools/testing/selftests/mm/.gitignore2
-rw-r--r--tools/testing/selftests/mm/Makefile2
-rw-r--r--tools/testing/selftests/mm/cow.c2
-rw-r--r--tools/testing/selftests/mm/guard-regions.c (renamed from tools/testing/selftests/mm/guard-pages.c)984
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c41
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c2
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c8
-rw-r--r--tools/testing/selftests/mm/map_populate.c5
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c14
-rw-r--r--tools/testing/selftests/mm/mkdirty.c8
-rw-r--r--tools/testing/selftests/mm/mlock-random-test.c4
-rw-r--r--tools/testing/selftests/mm/mlock2.h9
-rw-r--r--tools/testing/selftests/mm/mseal_test.c6
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h3
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c4
-rw-r--r--tools/testing/selftests/mm/protection_keys.c4
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh101
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c106
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c4
-rw-r--r--tools/testing/selftests/mm/uffd-common.c16
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c57
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c21
-rw-r--r--tools/testing/selftests/mm/uffd-wp-mremap.c5
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh28
-rw-r--r--tools/testing/selftests/mm/vm_util.h19
26 files changed, 1224 insertions, 233 deletions
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 121000c28c10..c5241b193db8 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -57,4 +57,4 @@ droppable
hugetlb_dio
pkey_sighandler_tests_32
pkey_sighandler_tests_64
-guard-pages
+guard-regions
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 63ce39d024bb..8270895039d1 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -97,7 +97,7 @@ TEST_GEN_FILES += hugetlb_fault_after_madv
TEST_GEN_FILES += hugetlb_madv_vs_map
TEST_GEN_FILES += hugetlb_dio
TEST_GEN_FILES += droppable
-TEST_GEN_FILES += guard-pages
+TEST_GEN_FILES += guard-regions
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 9446673645eb..f0cb14ea8608 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
mremap_size = thpsize / 2;
mremap_mem = mmap(NULL, mremap_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mem == MAP_FAILED) {
+ if (mremap_mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-regions.c
index ece37212a8a2..b3d0e2771096 100644
--- a/tools/testing/selftests/mm/guard-pages.c
+++ b/tools/testing/selftests/mm/guard-regions.c
@@ -6,6 +6,7 @@
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <linux/limits.h>
#include <linux/userfaultfd.h>
#include <setjmp.h>
#include <signal.h>
@@ -18,6 +19,9 @@
#include <sys/syscall.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "vm_util.h"
+
+#include "../pidfd/pidfd.h"
/*
* Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
@@ -37,6 +41,79 @@ static sigjmp_buf signal_jmp_buf;
*/
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
+/*
+ * How is the test backing the mapping being tested?
+ */
+enum backing_type {
+ ANON_BACKED,
+ SHMEM_BACKED,
+ LOCAL_FILE_BACKED,
+};
+
+FIXTURE(guard_regions)
+{
+ unsigned long page_size;
+ char path[PATH_MAX];
+ int fd;
+};
+
+FIXTURE_VARIANT(guard_regions)
+{
+ enum backing_type backing;
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, anon)
+{
+ .backing = ANON_BACKED,
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, shmem)
+{
+ .backing = SHMEM_BACKED,
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, file)
+{
+ .backing = LOCAL_FILE_BACKED,
+};
+
+static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
+{
+ switch (variant->backing) {
+ case ANON_BACKED:
+ case SHMEM_BACKED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void *mmap_(FIXTURE_DATA(guard_regions) * self,
+ const FIXTURE_VARIANT(guard_regions) * variant,
+ void *addr, size_t length, int prot, int extra_flags,
+ off_t offset)
+{
+ int fd;
+ int flags = extra_flags;
+
+ switch (variant->backing) {
+ case ANON_BACKED:
+ flags |= MAP_PRIVATE | MAP_ANON;
+ fd = -1;
+ break;
+ case SHMEM_BACKED:
+ case LOCAL_FILE_BACKED:
+ flags |= MAP_SHARED;
+ fd = self->fd;
+ break;
+ default:
+ ksft_exit_fail();
+ break;
+ }
+
+ return mmap(addr, length, prot, flags, fd, offset);
+}
+
static int userfaultfd(int flags)
{
return syscall(SYS_userfaultfd, flags);
@@ -50,11 +127,6 @@ static void handle_fatal(int c)
siglongjmp(signal_jmp_buf, c);
}
-static int pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(SYS_pidfd_open, pid, flags);
-}
-
static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
size_t n, int advice, unsigned int flags)
{
@@ -107,12 +179,7 @@ static bool try_read_write_buf(char *ptr)
return try_read_buf(ptr) && try_write_buf(ptr);
}
-FIXTURE(guard_pages)
-{
- unsigned long page_size;
-};
-
-FIXTURE_SETUP(guard_pages)
+static void setup_sighandler(void)
{
struct sigaction act = {
.sa_handler = &handle_fatal,
@@ -122,11 +189,9 @@ FIXTURE_SETUP(guard_pages)
sigemptyset(&act.sa_mask);
if (sigaction(SIGSEGV, &act, NULL))
ksft_exit_fail_perror("sigaction");
+}
- self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
-};
-
-FIXTURE_TEARDOWN(guard_pages)
+static void teardown_sighandler(void)
{
struct sigaction act = {
.sa_handler = SIG_DFL,
@@ -137,15 +202,109 @@ FIXTURE_TEARDOWN(guard_pages)
sigaction(SIGSEGV, &act, NULL);
}
-TEST_F(guard_pages, basic)
+static int open_file(const char *prefix, char *path)
+{
+ int fd;
+
+ snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
+ fd = mkstemp(path);
+ if (fd < 0)
+ ksft_exit_fail_perror("mkstemp");
+
+ return fd;
+}
+
+/* Establish a varying pattern in a buffer. */
+static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ char *ptr2 = &ptr[i * page_size];
+
+ memset(ptr2, 'a' + (i % 26), page_size);
+ }
+}
+
+/*
+ * Check that a buffer contains the pattern set by set_pattern(), starting at a
+ * page offset of pgoff within the buffer.
+ */
+static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
+ size_t pgoff)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages * page_size; i++) {
+ size_t offset = pgoff * page_size + i;
+ char actual = ptr[offset];
+ char expected = 'a' + ((offset / page_size) % 26);
+
+ if (actual != expected)
+ return false;
+ }
+
+ return true;
+}
+
+/* Check that a buffer contains the pattern set by set_pattern(). */
+static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
+{
+ return check_pattern_offset(ptr, num_pages, page_size, 0);
+}
+
+/* Determine if a buffer contains only repetitions of a specified char. */
+static bool is_buf_eq(char *buf, size_t size, char chr)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (buf[i] != chr)
+ return false;
+ }
+
+ return true;
+}
+
+FIXTURE_SETUP(guard_regions)
+{
+ self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+ setup_sighandler();
+
+ if (variant->backing == ANON_BACKED)
+ return;
+
+ self->fd = open_file(
+ variant->backing == SHMEM_BACKED ? "/tmp/" : "",
+ self->path);
+
+ /* We truncate file to at least 100 pages, tests can modify as needed. */
+ ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
+};
+
+FIXTURE_TEARDOWN_PARENT(guard_regions)
+{
+ teardown_sighandler();
+
+ if (variant->backing == ANON_BACKED)
+ return;
+
+ if (self->fd >= 0)
+ close(self->fd);
+
+ if (self->path[0] != '\0')
+ unlink(self->path);
+}
+
+TEST_F(guard_regions, basic)
{
const unsigned long NUM_PAGES = 10;
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
+ ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Trivially assert we can touch the first page. */
@@ -231,32 +390,30 @@ TEST_F(guard_pages, basic)
}
/* Assert that operations applied across multiple VMAs work as expected. */
-TEST_F(guard_pages, multi_vma)
+TEST_F(guard_regions, multi_vma)
{
const unsigned long page_size = self->page_size;
char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
int i;
/* Reserve a 100 page region over which we can install VMAs. */
- ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_region = mmap_(self, variant, NULL, 100 * page_size,
+ PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
/* Place a VMA of 10 pages size at the start of the region. */
- ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr1, MAP_FAILED);
/* Place a VMA of 5 pages size 50 pages into the region. */
- ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
/* Place a VMA of 20 pages size at the end of the region. */
- ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
/* Unmap gaps. */
@@ -326,13 +483,11 @@ TEST_F(guard_pages, multi_vma)
}
/* Now map incompatible VMAs in the gaps. */
- ptr = mmap(&ptr_region[10 * page_size], 40 * page_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
- ptr = mmap(&ptr_region[55 * page_size], 25 * page_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
/*
@@ -367,20 +522,16 @@ TEST_F(guard_pages, multi_vma)
* Assert that batched operations performed using process_madvise() work as
* expected.
*/
-TEST_F(guard_pages, process_madvise)
+TEST_F(guard_regions, process_madvise)
{
const unsigned long page_size = self->page_size;
- pid_t pid = getpid();
- int pidfd = pidfd_open(pid, 0);
char *ptr_region, *ptr1, *ptr2, *ptr3;
ssize_t count;
struct iovec vec[6];
- ASSERT_NE(pidfd, -1);
-
/* Reserve region to map over. */
- ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_region = mmap_(self, variant, NULL, 100 * page_size,
+ PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
/*
@@ -388,9 +539,8 @@ TEST_F(guard_pages, process_madvise)
* overwrite existing entries and test this code path against
* overwriting existing entries.
*/
- ptr1 = mmap(&ptr_region[page_size], 10 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0);
+ ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
ASSERT_NE(ptr1, MAP_FAILED);
/* We want guard markers at start/end of each VMA. */
vec[0].iov_base = ptr1;
@@ -399,9 +549,8 @@ TEST_F(guard_pages, process_madvise)
vec[1].iov_len = page_size;
/* 5 pages offset 50 pages into reserve region. */
- ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
vec[2].iov_base = ptr2;
vec[2].iov_len = page_size;
@@ -409,9 +558,8 @@ TEST_F(guard_pages, process_madvise)
vec[3].iov_len = page_size;
/* 20 pages offset 79 pages into reserve region. */
- ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
vec[4].iov_base = ptr3;
vec[4].iov_len = page_size;
@@ -425,7 +573,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
/* Now guard in one step. */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
/* OK we don't have permission to do this, skip. */
if (count == -1 && errno == EPERM)
@@ -446,7 +594,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
/* Now do the same with unguard... */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
/* ...and everything should now succeed. */
@@ -463,17 +611,16 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
- close(pidfd);
}
/* Assert that unmapping ranges does not leave guard markers behind. */
-TEST_F(guard_pages, munmap)
+TEST_F(guard_regions, munmap)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new1, *ptr_new2;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard first and last pages. */
@@ -489,11 +636,11 @@ TEST_F(guard_pages, munmap)
ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
/* Map over them.*/
- ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
+ MAP_FIXED, 0);
ASSERT_NE(ptr_new1, MAP_FAILED);
- ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new2, MAP_FAILED);
/* Assert that they are now not guarded. */
@@ -505,14 +652,14 @@ TEST_F(guard_pages, munmap)
}
/* Assert that mprotect() operations have no bearing on guard markers. */
-TEST_F(guard_pages, mprotect)
+TEST_F(guard_regions, mprotect)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard the middle of the range. */
@@ -553,14 +700,14 @@ TEST_F(guard_pages, mprotect)
}
/* Split and merge VMAs and make sure guard pages still behave. */
-TEST_F(guard_pages, split_merge)
+TEST_F(guard_regions, split_merge)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard the whole range. */
@@ -601,14 +748,14 @@ TEST_F(guard_pages, split_merge)
}
/* Now map them again - the unmap will have cleared the guards. */
- ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
- ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
- ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now make sure guard pages are established. */
@@ -684,14 +831,14 @@ TEST_F(guard_pages, split_merge)
}
/* Assert that MADV_DONTNEED does not remove guard markers. */
-TEST_F(guard_pages, dontneed)
+TEST_F(guard_regions, dontneed)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Back the whole range. */
@@ -721,8 +868,16 @@ TEST_F(guard_pages, dontneed)
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result);
- /* Make sure we really did get reset to zero page. */
- ASSERT_EQ(*curr, '\0');
+ switch (variant->backing) {
+ case ANON_BACKED:
+ /* If anon, then we get a zero page. */
+ ASSERT_EQ(*curr, '\0');
+ break;
+ default:
+ /* Otherwise, we get the file data. */
+ ASSERT_EQ(*curr, 'y');
+ break;
+ }
}
/* Now write... */
@@ -737,14 +892,14 @@ TEST_F(guard_pages, dontneed)
}
/* Assert that mlock()'ed pages work correctly with guard markers. */
-TEST_F(guard_pages, mlock)
+TEST_F(guard_regions, mlock)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Populate. */
@@ -810,14 +965,14 @@ TEST_F(guard_pages, mlock)
*
* - Moving a mapping alone should retain markers as they are.
*/
-TEST_F(guard_pages, mremap_move)
+TEST_F(guard_regions, mremap_move)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
/* Map 5 pages. */
- ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 5 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Place guard markers at both ends of the 5 page span. */
@@ -831,8 +986,7 @@ TEST_F(guard_pages, mremap_move)
/* Map a new region we will move this range into. Doing this ensures
* that we have reserved a range to map into.
*/
- ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE,
- -1, 0);
+ ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
@@ -857,14 +1011,14 @@ TEST_F(guard_pages, mremap_move)
* will have to remove guard pages manually to fix up (they'd have to do the
* same if it were a PROT_NONE mapping).
*/
-TEST_F(guard_pages, mremap_expand)
+TEST_F(guard_regions, mremap_expand)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
/* Map 10 pages... */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* ...But unmap the last 5 so we can ensure we can expand into them. */
ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
@@ -888,8 +1042,7 @@ TEST_F(guard_pages, mremap_expand)
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Reserve a region which we can move to and expand into. */
- ptr_new = mmap(NULL, 20 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now move and expand into it. */
@@ -920,15 +1073,15 @@ TEST_F(guard_pages, mremap_expand)
* if the user were using a PROT_NONE mapping they'd have to manually fix this
* up also so this is OK.
*/
-TEST_F(guard_pages, mremap_shrink)
+TEST_F(guard_regions, mremap_shrink)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
/* Map 5 pages. */
- ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 5 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Place guard markers at both ends of the 5 page span. */
@@ -984,7 +1137,7 @@ TEST_F(guard_pages, mremap_shrink)
* Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
* retain guard pages.
*/
-TEST_F(guard_pages, fork)
+TEST_F(guard_regions, fork)
{
const unsigned long page_size = self->page_size;
char *ptr;
@@ -992,8 +1145,8 @@ TEST_F(guard_pages, fork)
int i;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Establish guard pages in the first 5 pages. */
@@ -1039,16 +1192,19 @@ TEST_F(guard_pages, fork)
* Assert expected behaviour after we fork populated ranges of anonymous memory
* and then guard and unguard the range.
*/
-TEST_F(guard_pages, fork_cow)
+TEST_F(guard_regions, fork_cow)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "CoW only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Populate range. */
@@ -1110,16 +1266,19 @@ TEST_F(guard_pages, fork_cow)
* Assert that forking a process with VMAs that do have VM_WIPEONFORK set
* behave as expected.
*/
-TEST_F(guard_pages, fork_wipeonfork)
+TEST_F(guard_regions, fork_wipeonfork)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "Wipe on fork only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Mark wipe on fork. */
@@ -1160,15 +1319,18 @@ TEST_F(guard_pages, fork_wipeonfork)
}
/* Ensure that MADV_FREE retains guard entries as expected. */
-TEST_F(guard_pages, lazyfree)
+TEST_F(guard_regions, lazyfree)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "MADV_FREE only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1196,14 +1358,14 @@ TEST_F(guard_pages, lazyfree)
}
/* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
-TEST_F(guard_pages, populate)
+TEST_F(guard_regions, populate)
{
const unsigned long page_size = self->page_size;
char *ptr;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1222,15 +1384,15 @@ TEST_F(guard_pages, populate)
}
/* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
-TEST_F(guard_pages, cold_pageout)
+TEST_F(guard_regions, cold_pageout)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1268,7 +1430,7 @@ TEST_F(guard_pages, cold_pageout)
}
/* Ensure that guard pages do not break userfaultd. */
-TEST_F(guard_pages, uffd)
+TEST_F(guard_regions, uffd)
{
const unsigned long page_size = self->page_size;
int uffd;
@@ -1281,6 +1443,9 @@ TEST_F(guard_pages, uffd)
struct uffdio_register reg;
struct uffdio_range range;
+ if (!is_anon_backed(variant))
+ SKIP(return, "uffd only works on anon backing");
+
/* Set up uffd. */
uffd = userfaultfd(0);
if (uffd == -1 && errno == EPERM)
@@ -1290,8 +1455,8 @@ TEST_F(guard_pages, uffd)
ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Register the range with uffd. */
@@ -1317,4 +1482,593 @@ TEST_F(guard_pages, uffd)
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
+/*
+ * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
+ * aggressively read-ahead, then install guard regions and assert that it
+ * behaves correctly.
+ *
+ * We page out using MADV_PAGEOUT before checking guard regions so we drop page
+ * cache folios, meaning we maximise the possibility of some broken readahead.
+ */
+TEST_F(guard_regions, madvise_sequential)
+{
+ char *ptr;
+ int i;
+ const unsigned long page_size = self->page_size;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Establish a pattern of data in the file. */
+ set_pattern(ptr, 10, page_size);
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Mark it as being accessed sequentially. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
+
+ /* Mark every other page a guard page. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr2 = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now page it out. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
+
+ /* Now make sure pages are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *chrp = &ptr[i * page_size];
+
+ if (i % 2 == 0) {
+ bool result = try_read_write_buf(chrp);
+
+ ASSERT_FALSE(result);
+ } else {
+ ASSERT_EQ(*chrp, 'a' + i);
+ }
+ }
+
+ /* Now remove guard pages. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Now make sure all data is as expected. */
+ if (!check_pattern(ptr, 10, page_size))
+ ASSERT_TRUE(false);
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Check that file-backed mappings implement guard regions with MAP_PRIVATE
+ * correctly.
+ */
+TEST_F(guard_regions, map_private)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr_shared, *ptr_private;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "MAP_PRIVATE test specific to file-backed");
+
+ ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr_shared, MAP_FAILED);
+
+ /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
+ ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
+ ASSERT_NE(ptr_private, MAP_FAILED);
+
+ /* Set pattern in shared mapping. */
+ set_pattern(ptr_shared, 10, page_size);
+
+ /* Install guard regions in every other page in the shared mapping. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_shared[i * page_size];
+
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ for (i = 0; i < 10; i++) {
+ /* Every even shared page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
+ /* Private mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
+ }
+
+ /* Install guard regions in every other page in the private mapping. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_private[i * page_size];
+
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ for (i = 0; i < 10; i++) {
+ /* Every even shared page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
+ /* Every odd private page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
+ }
+
+ /* Remove guard regions from shared mapping. */
+ ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ for (i = 0; i < 10; i++) {
+ /* Shared mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ /* Every even private page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
+ }
+
+ /* Remove guard regions from private mapping. */
+ ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ for (i = 0; i < 10; i++) {
+ /* Shared mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ /* Private mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
+ }
+
+ /* Ensure patterns are intact. */
+ ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
+ ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
+
+ /* Now write out every other page to MAP_PRIVATE. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_private[i * page_size];
+
+ memset(ptr, 'a' + i, page_size);
+ }
+
+ /*
+ * At this point the mapping is:
+ *
+ * 0123456789
+ * SPSPSPSPSP
+ *
+ * Where S = shared, P = private mappings.
+ */
+
+ /* Now mark the beginning of the mapping guarded. */
+ ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
+
+ /*
+ * This renders the mapping:
+ *
+ * 0123456789
+ * xxxxxPSPSP
+ */
+
+ for (i = 0; i < 10; i++) {
+ char *ptr = &ptr_private[i * page_size];
+
+ /* Ensure guard regions as expected. */
+ ASSERT_EQ(try_read_buf(ptr), i >= 5);
+ /* The shared mapping should always succeed. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ }
+
+ /* Remove the guard regions altogether. */
+ ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /*
+ *
+ * We now expect the mapping to be:
+ *
+ * 0123456789
+ * SSSSSPSPSP
+ *
+ * As we removed guard regions, the private pages from the first 5 will
+ * have been zapped, so on fault will reestablish the shared mapping.
+ */
+
+ for (i = 0; i < 10; i++) {
+ char *ptr = &ptr_private[i * page_size];
+
+ /*
+ * Assert that shared mappings in the MAP_PRIVATE mapping match
+ * the shared mapping.
+ */
+ if (i < 5 || i % 2 == 0) {
+ char *ptr_s = &ptr_shared[i * page_size];
+
+ ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
+ continue;
+ }
+
+ /* Everything else is a private mapping. */
+ ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
+ }
+
+ ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
+ ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
+}
+
+/* Test that guard regions established over a read-only mapping function correctly. */
+TEST_F(guard_regions, readonly_file)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Read-only test specific to file-backed");
+
+ /* Map shared so we can populate with pattern, populate it, unmap. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+ /* Close the fd so we can re-open read-only. */
+ ASSERT_EQ(close(self->fd), 0);
+
+ /* Re-open read-only. */
+ self->fd = open(self->path, O_RDONLY);
+ ASSERT_NE(self->fd, -1);
+ /* Re-map read-only. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Mark every other page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_pg = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Assert that the guard regions are in place.*/
+ for (i = 0; i < 10; i++) {
+ char *ptr_pg = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
+ }
+
+ /* Remove guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Ensure the data is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, fault_around)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Fault-around test specific to file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Establish a pattern in the backing file. */
+ set_pattern(ptr, 10, page_size);
+
+ /*
+ * Now drop it from the page cache so we get major faults when next we
+ * map it.
+ */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
+
+ /* Unmap and remap 'to be sure'. */
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now fault in every odd page. This should trigger fault-around. */
+ for (i = 1; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_TRUE(try_read_buf(ptr_p));
+ }
+
+ /* Finally, ensure that guard regions are intact as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, truncation)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Truncation test specific to file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Establish a pattern in the backing file, just so there is data
+ * there.
+ */
+ set_pattern(ptr, 10, page_size);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now truncate to actually used size (initialised to 100). */
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Here the guard regions will remain intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now truncate to half the size, then truncate again to the full size. */
+ ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Again, guard pages will remain intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, hole_punch)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Truncation test specific to file-backed");
+
+ /* Establish pattern in mapping. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+
+ /* Install a guard region in the middle of the mapping. */
+ ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
+ MADV_GUARD_INSTALL), 0);
+
+ /*
+ * The buffer will now be:
+ *
+ * 0123456789
+ * ***xxxx***
+ *
+ * Where * is data and x is the guard region.
+ */
+
+ /* Ensure established. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
+ }
+
+ /* Now hole punch the guarded region. */
+ ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
+ MADV_REMOVE), 0);
+
+ /* Ensure guard regions remain. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
+ }
+
+ /* Now remove guard region throughout. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Check that the pattern exists in non-hole punched region. */
+ ASSERT_TRUE(check_pattern(ptr, 3, page_size));
+ /* Check that hole punched region is zeroed. */
+ ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
+ /* Check that the pattern exists in the remainder of the file. */
+ ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Ensure that a memfd works correctly with guard regions, that we can write
+ * seal it then open the mapping read-only and still establish guard regions
+ * within, remove those guard regions and have everything work correctly.
+ */
+TEST_F(guard_regions, memfd_write_seal)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing != SHMEM_BACKED)
+ SKIP(return, "memfd write seal test specific to shmem");
+
+ /* OK, we need a memfd, so close existing one. */
+ ASSERT_EQ(close(self->fd), 0);
+
+ /* Create and truncate memfd. */
+ self->fd = memfd_create("guard_regions_memfd_seals_test",
+ MFD_ALLOW_SEALING);
+ ASSERT_NE(self->fd, -1);
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Map, set pattern, unmap. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+
+ /* Write-seal the memfd. */
+ ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
+
+ /* Now map the memfd readonly. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Ensure pattern is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now remove guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Ensure pattern is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Ensure write seal intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_FALSE(try_write_buf(ptr_p));
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+
+/*
+ * Since we are now permitted to establish guard regions in read-only anonymous
+ * mappings, for the sake of thoroughness, though it probably has no practical
+ * use, test that guard regions function with a mapping to the anonymous zero
+ * page.
+ */
+TEST_F(guard_regions, anon_zeropage)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (!is_anon_backed(variant))
+ SKIP(return, "anon zero page test specific to anon/shmem");
+
+ /* Obtain a read-only i.e. anon zero page mapping. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now remove all guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_TRUE(try_read_buf(ptr_p));
+ }
+
+ /* Ensure zero page...*/
+ ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
+ */
+TEST_F(guard_regions, pagemap)
+{
+ const unsigned long page_size = self->page_size;
+ int proc_fd;
+ char *ptr;
+ int i;
+
+ proc_fd = open("/proc/self/pagemap", O_RDONLY);
+ ASSERT_NE(proc_fd, -1);
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Read from pagemap, and assert no guard regions are detected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+ unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
+ unsigned long masked = entry & PM_GUARD_REGION;
+
+ ASSERT_EQ(masked, 0);
+ }
+
+ /* Install a guard region in every other page. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Re-read from pagemap, and assert guard regions are detected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+ unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
+ unsigned long masked = entry & PM_GUARD_REGION;
+
+ ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
+ }
+
+ ASSERT_EQ(close(proc_fd), 0);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index 9423ad439a61..21595b20bbc3 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -96,13 +96,17 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
int ret;
if (ftruncate(fd, size)) {
- ksft_test_result_fail("ftruncate() failed\n");
+ if (errno == ENOENT) {
+ skip_test_dodgy_fs("ftruncate()");
+ } else {
+ ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno));
+ }
return;
}
if (fallocate(fd, 0, 0, size)) {
if (size == pagesize)
- ksft_test_result_fail("fallocate() failed\n");
+ ksft_test_result_fail("fallocate() failed (%s)\n", strerror(errno));
else
ksft_test_result_skip("need more free huge pages\n");
return;
@@ -112,7 +116,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
shared ? MAP_SHARED : MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
if (size == pagesize || shared)
- ksft_test_result_fail("mmap() failed\n");
+ ksft_test_result_fail("mmap() failed (%s)\n", strerror(errno));
else
ksft_test_result_skip("need more free huge pages\n");
return;
@@ -130,7 +134,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
*/
ret = mprotect(mem, size, PROT_READ);
if (ret) {
- ksft_test_result_fail("mprotect() failed\n");
+ ksft_test_result_fail("mprotect() failed (%s)\n", strerror(errno));
goto munmap;
}
/* FALLTHROUGH */
@@ -165,18 +169,20 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
args.flags |= rw ? PIN_LONGTERM_TEST_FLAG_USE_WRITE : 0;
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args);
if (ret && errno == EINVAL) {
- ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n");
+ ksft_test_result_skip("PIN_LONGTERM_TEST_START failed (EINVAL)n");
break;
} else if (ret && errno == EFAULT) {
ksft_test_result(!should_work, "Should have failed\n");
break;
} else if (ret) {
- ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n");
+ ksft_test_result_fail("PIN_LONGTERM_TEST_START failed (%s)\n",
+ strerror(errno));
break;
}
if (ioctl(gup_fd, PIN_LONGTERM_TEST_STOP))
- ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n");
+ ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed (%s)\n",
+ strerror(errno));
/*
* TODO: if the kernel ever supports long-term R/W pinning on
@@ -202,7 +208,8 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
/* Skip on errors, as we might just lack kernel support. */
ret = io_uring_queue_init(1, &ring, 0);
if (ret < 0) {
- ksft_test_result_skip("io_uring_queue_init() failed\n");
+ ksft_test_result_skip("io_uring_queue_init() failed (%s)\n",
+ strerror(-ret));
break;
}
/*
@@ -215,13 +222,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
/* Only new kernels return EFAULT. */
if (ret && (errno == ENOSPC || errno == EOPNOTSUPP ||
errno == EFAULT)) {
- ksft_test_result(!should_work, "Should have failed\n");
+ ksft_test_result(!should_work, "Should have failed (%s)\n",
+ strerror(errno));
} else if (ret) {
/*
* We might just lack support or have insufficient
* MEMLOCK limits.
*/
- ksft_test_result_skip("io_uring_register_buffers() failed\n");
+ ksft_test_result_skip("io_uring_register_buffers() failed (%s)\n",
+ strerror(-ret));
} else {
ksft_test_result(should_work, "Should have worked\n");
io_uring_unregister_buffers(&ring);
@@ -249,7 +258,7 @@ static void run_with_memfd(test_fn fn, const char *desc)
fd = memfd_create("test", 0);
if (fd < 0) {
- ksft_test_result_fail("memfd_create() failed\n");
+ ksft_test_result_fail("memfd_create() failed (%s)\n", strerror(errno));
return;
}
@@ -266,13 +275,13 @@ static void run_with_tmpfile(test_fn fn, const char *desc)
file = tmpfile();
if (!file) {
- ksft_test_result_fail("tmpfile() failed\n");
+ ksft_test_result_fail("tmpfile() failed (%s)\n", strerror(errno));
return;
}
fd = fileno(file);
if (fd < 0) {
- ksft_test_result_fail("fileno() failed\n");
+ ksft_test_result_fail("fileno() failed (%s)\n", strerror(errno));
goto close;
}
@@ -290,12 +299,12 @@ static void run_with_local_tmpfile(test_fn fn, const char *desc)
fd = mkstemp(filename);
if (fd < 0) {
- ksft_test_result_fail("mkstemp() failed\n");
+ ksft_test_result_fail("mkstemp() failed (%s)\n", strerror(errno));
return;
}
if (unlink(filename)) {
- ksft_test_result_fail("unlink() failed\n");
+ ksft_test_result_fail("unlink() failed (%s)\n", strerror(errno));
goto close;
}
@@ -317,7 +326,7 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc,
fd = memfd_create("test", flags);
if (fd < 0) {
- ksft_test_result_skip("memfd_create() failed\n");
+ ksft_test_result_skip("memfd_create() failed (%s)\n", strerror(errno));
return;
}
diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
index ada9156cc497..c463d1c09c9b 100644
--- a/tools/testing/selftests/mm/hugepage-mremap.c
+++ b/tools/testing/selftests/mm/hugepage-mremap.c
@@ -15,7 +15,7 @@
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h> /* Definition of O_* constants */
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index 66b4e111b5a2..b61803e36d1c 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -11,7 +11,7 @@
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
@@ -369,6 +369,7 @@ unmap:
munmap(map, size);
}
+#ifdef __NR_userfaultfd
static void test_unmerge_uffd_wp(void)
{
struct uffdio_writeprotect uffd_writeprotect;
@@ -429,6 +430,7 @@ close_uffd:
unmap:
munmap(map, size);
}
+#endif
/* Verify that KSM can be enabled / queried with prctl. */
static void test_prctl(void)
@@ -684,7 +686,9 @@ int main(int argc, char **argv)
exit(test_child_ksm());
}
+#ifdef __NR_userfaultfd
tests++;
+#endif
ksft_print_header();
ksft_set_plan(tests);
@@ -696,7 +700,9 @@ int main(int argc, char **argv)
test_unmerge();
test_unmerge_zero_pages();
test_unmerge_discarded();
+#ifdef __NR_userfaultfd
test_unmerge_uffd_wp();
+#endif
test_prot_none();
diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c
index 5c8a53869b1b..9df2636c829b 100644
--- a/tools/testing/selftests/mm/map_populate.c
+++ b/tools/testing/selftests/mm/map_populate.c
@@ -18,6 +18,8 @@
#include <unistd.h>
#include "../kselftest.h"
+#include "vm_util.h"
+
#define MMAP_SZ 4096
#define BUG_ON(condition, description) \
@@ -87,6 +89,9 @@ int main(int argc, char **argv)
BUG_ON(!ftmp, "tmpfile()");
ret = ftruncate(fileno(ftmp), MMAP_SZ);
+ if (ret < 0 && errno == ENOENT) {
+ skip_test_dodgy_fs("ftruncate()");
+ }
BUG_ON(ret, "ftruncate()");
smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
index 74c911aa3aea..9a0597310a76 100644
--- a/tools/testing/selftests/mm/memfd_secret.c
+++ b/tools/testing/selftests/mm/memfd_secret.c
@@ -17,7 +17,7 @@
#include <stdlib.h>
#include <string.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <fcntl.h>
@@ -28,6 +28,8 @@
#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
#define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
+#ifdef __NR_memfd_secret
+
#define PATTERN 0x55
static const int prot = PROT_READ | PROT_WRITE;
@@ -332,3 +334,13 @@ int main(int argc, char *argv[])
ksft_finished();
}
+
+#else /* __NR_memfd_secret */
+
+int main(int argc, char *argv[])
+{
+ printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_memfd_secret */
diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
index af2fce496912..09feeb453646 100644
--- a/tools/testing/selftests/mm/mkdirty.c
+++ b/tools/testing/selftests/mm/mkdirty.c
@@ -9,7 +9,7 @@
*/
#include <fcntl.h>
#include <signal.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
@@ -265,6 +265,7 @@ munmap:
munmap(mmap_mem, mmap_size);
}
+#ifdef __NR_userfaultfd
static void test_uffdio_copy(void)
{
struct uffdio_register uffdio_register;
@@ -322,6 +323,7 @@ munmap:
munmap(dst, pagesize);
free(src);
}
+#endif /* __NR_userfaultfd */
int main(void)
{
@@ -334,7 +336,9 @@ int main(void)
thpsize / 1024);
tests += 3;
}
+#ifdef __NR_userfaultfd
tests += 1;
+#endif /* __NR_userfaultfd */
ksft_print_header();
ksft_set_plan(tests);
@@ -364,7 +368,9 @@ int main(void)
if (thpsize)
test_pte_mapped_thp();
/* Placing a fresh page via userfaultfd may set the PTE dirty. */
+#ifdef __NR_userfaultfd
test_uffdio_copy();
+#endif /* __NR_userfaultfd */
err = ksft_get_fail_cnt();
if (err)
diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c
index 1cd80b0f76c3..b8d7e966f44c 100644
--- a/tools/testing/selftests/mm/mlock-random-test.c
+++ b/tools/testing/selftests/mm/mlock-random-test.c
@@ -161,9 +161,9 @@ static void test_mlock_within_limit(char *p, int alloc_size)
MLOCK_ONFAULT);
if (ret)
- ksft_exit_fail_msg("%s() failure at |%p(%d)| mlock:|%p(%d)|\n",
+ ksft_exit_fail_msg("%s() failure (%s) at |%p(%d)| mlock:|%p(%d)|\n",
is_mlock ? "mlock" : "mlock2",
- p, alloc_size,
+ strerror(errno), p, alloc_size,
p + start_offset, lock_size);
}
diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h
index 1e5731bab499..81e77fa41901 100644
--- a/tools/testing/selftests/mm/mlock2.h
+++ b/tools/testing/selftests/mm/mlock2.h
@@ -3,11 +3,16 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
-#include <asm-generic/unistd.h>
static int mlock2_(void *start, size_t len, int flags)
{
- return syscall(__NR_mlock2, start, len, flags);
+ int ret = syscall(__NR_mlock2, start, len, flags);
+
+ if (ret) {
+ errno = ret;
+ return -1;
+ }
+ return 0;
}
static FILE *seek_to_smaps_entry(unsigned long addr)
diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c
index ad17005521a8..005f29c86484 100644
--- a/tools/testing/selftests/mm/mseal_test.c
+++ b/tools/testing/selftests/mm/mseal_test.c
@@ -218,7 +218,7 @@ bool seal_support(void)
bool pkey_supported(void)
{
#if defined(__i386__) || defined(__x86_64__) /* arch */
- int pkey = sys_pkey_alloc(0, 0);
+ int pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
if (pkey > 0)
return true;
@@ -1671,7 +1671,7 @@ static void test_seal_discard_ro_anon_on_pkey(bool seal)
setup_single_address_rw(size, &ptr);
FAIL_TEST_IF_FALSE(ptr != (void *)-1);
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
FAIL_TEST_IF_FALSE(pkey > 0);
ret = sys_mprotect_pkey((void *)ptr, size, PROT_READ | PROT_WRITE, pkey);
@@ -1683,7 +1683,7 @@ static void test_seal_discard_ro_anon_on_pkey(bool seal)
}
/* sealing doesn't take effect if PKRU allow write. */
- set_pkey(pkey, 0);
+ set_pkey(pkey, PKEY_UNRESTRICTED);
ret = sys_madvise(ptr, size, MADV_DONTNEED);
FAIL_TEST_IF_FALSE(!ret);
diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h
index f080e97b39be..ea404f80e6cb 100644
--- a/tools/testing/selftests/mm/pkey-helpers.h
+++ b/tools/testing/selftests/mm/pkey-helpers.h
@@ -13,6 +13,7 @@
#include <ucontext.h>
#include <sys/mman.h>
+#include <linux/mman.h>
#include <linux/types.h>
#include "../kselftest.h"
@@ -193,7 +194,7 @@ static inline u32 *siginfo_get_pkey_ptr(siginfo_t *si)
static inline int kernel_has_pkeys(void)
{
/* try allocating a key and see if it succeeds */
- int ret = sys_pkey_alloc(0, 0);
+ int ret = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
if (ret <= 0) {
return 0;
}
diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c
index 1ac8c8809880..b5e076a564c9 100644
--- a/tools/testing/selftests/mm/pkey_sighandler_tests.c
+++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c
@@ -311,7 +311,7 @@ static void test_sigsegv_handler_with_different_pkey_for_stack(void)
__write_pkey_reg(pkey_reg);
/* Protect the new stack with MPK 1 */
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey);
/* Set up alternate signal stack that will use the default MPK */
@@ -484,7 +484,7 @@ static void test_pkru_sigreturn(void)
__write_pkey_reg(pkey_reg);
/* Protect the stack with MPK 2 */
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey);
/* Set up alternate signal stack that will use the default MPK */
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index a4683f2476f2..23ebec367015 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -42,7 +42,7 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <sys/ptrace.h>
#include <setjmp.h>
@@ -463,7 +463,7 @@ static pid_t fork_lazy_child(void)
static int alloc_pkey(void)
{
int ret;
- unsigned long init_val = 0x0;
+ unsigned long init_val = PKEY_UNRESTRICTED;
dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
__func__, __LINE__, __read_pkey_reg(), shadow_pkey_reg);
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index 333c468c2699..9aff33b10999 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -187,9 +187,10 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
printf "Not enough huge pages available (%d < %d)\n" \
"$freepgs" "$needpgs"
fi
+ HAVE_HUGEPAGES=1
else
echo "no hugetlbfs support in kernel?"
- exit 1
+ HAVE_HUGEPAGES=0
fi
# filter 64bit architectures
@@ -218,13 +219,20 @@ pretty_name() {
# Usage: run_test [test binary] [arbitrary test arguments...]
run_test() {
if test_selected ${CATEGORY}; then
+ local skip=0
+
# On memory constrainted systems some tests can fail to allocate hugepages.
# perform some cleanup before the test for a higher success rate.
- if [ ${CATEGORY} == "thp" ] | [ ${CATEGORY} == "hugetlb" ]; then
- echo 3 > /proc/sys/vm/drop_caches
- sleep 2
- echo 1 > /proc/sys/vm/compact_memory
- sleep 2
+ if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
+ if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ echo 3 > /proc/sys/vm/drop_caches
+ sleep 2
+ echo 1 > /proc/sys/vm/compact_memory
+ sleep 2
+ else
+ echo "hugepages not supported" | tap_prefix
+ skip=1
+ fi
fi
local test=$(pretty_name "$*")
@@ -232,8 +240,12 @@ run_test() {
local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
- ("$@" 2>&1) | tap_prefix
- local ret=${PIPESTATUS[0]}
+ if [ "${skip}" != "1" ]; then
+ ("$@" 2>&1) | tap_prefix
+ local ret=${PIPESTATUS[0]}
+ else
+ local ret=$ksft_skip
+ fi
count_total=$(( count_total + 1 ))
if [ $ret -eq 0 ]; then
count_pass=$(( count_pass + 1 ))
@@ -271,13 +283,15 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap
CATEGORY="hugetlb" run_test ./hugetlb-madvise
CATEGORY="hugetlb" run_test ./hugetlb_dio
-nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
-# For this test, we need one and just one huge page
-echo 1 > /proc/sys/vm/nr_hugepages
-CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
-CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
-# Restore the previous number of huge pages, since further tests rely on it
-echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
+if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
+ # For this test, we need one and just one huge page
+ echo 1 > /proc/sys/vm/nr_hugepages
+ CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
+ CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
+ # Restore the previous number of huge pages, since further tests rely on it
+ echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
+fi
if test_selected "hugetlb"; then
echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix
@@ -304,19 +318,42 @@ uffd_stress_bin=./uffd-stress
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
# Hugetlb tests require source and destination huge pages. Pass in half
# the size of the free pages we have, which is used for *each*.
-half_ufd_size_MB=$((freepgs / 2))
+# uffd-stress expects a region expressed in MiB, so we adjust
+# half_ufd_size_MB accordingly.
+half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2))
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
-CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
+# uffd-wp-mremap requires at least one page of each size.
+have_all_size_hugepgs=true
+declare -A nr_size_hugepgs
+for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do
+ old=$(cat $f)
+ nr_size_hugepgs["$f"]="$old"
+ if [ "$old" == 0 ]; then
+ echo 1 > "$f"
+ fi
+ if [ $(cat "$f") == 0 ]; then
+ have_all_size_hugepgs=false
+ break
+ fi
+done
+if $have_all_size_hugepgs; then
+ CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
+else
+ echo "# SKIP ./uffd-wp-mremap"
+fi
#cleanup
+for f in "${!nr_size_hugepgs[@]}"; do
+ echo "${nr_size_hugepgs["$f"]}" > "$f"
+done
echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
CATEGORY="compaction" run_test ./compaction_test
-if command -v sudo &> /dev/null;
+if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null;
then
CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
else
@@ -379,19 +416,21 @@ CATEGORY="mremap" run_test ./mremap_dontunmap
CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
-CATEGORY="madv_guard" run_test ./guard-pages
+CATEGORY="madv_guard" run_test ./guard-regions
# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
CATEGORY="madv_populate" run_test ./madv_populate
if [ -x ./memfd_secret ]
then
-(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
+(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
CATEGORY="memfd_secret" run_test ./memfd_secret
fi
# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
-CATEGORY="ksm" run_test ./ksm_tests -H -s 100
+if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ CATEGORY="ksm" run_test ./ksm_tests -H -s 100
+fi
# KSM KSM_MERGE_TIME test with size of 100
CATEGORY="ksm" run_test ./ksm_tests -P -s 100
# KSM MADV_MERGEABLE test with 10 identical pages
@@ -440,15 +479,17 @@ CATEGORY="thp" run_test ./transhuge-stress -d 20
# Try to create XFS if not provided
if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
- if test_selected "thp"; then
- if grep xfs /proc/filesystems &>/dev/null; then
- XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
- SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
- truncate -s 314572800 ${XFS_IMG}
- mkfs.xfs -q ${XFS_IMG}
- mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
- MOUNTED_XFS=1
- fi
+ if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ if test_selected "thp"; then
+ if grep xfs /proc/filesystems &>/dev/null; then
+ XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
+ SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
+ truncate -s 314572800 ${XFS_IMG}
+ mkfs.xfs -q ${XFS_IMG}
+ mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
+ MOUNTED_XFS=1
+ fi
+ fi
fi
fi
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 3f353f3d070f..aa7400ed0e99 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -5,6 +5,7 @@
*/
#define _GNU_SOURCE
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -14,6 +15,7 @@
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/mount.h>
+#include <sys/param.h>
#include <malloc.h>
#include <stdbool.h>
#include <time.h>
@@ -261,18 +263,32 @@ void split_pte_mapped_thp(void)
close(kpageflags_fd);
}
-void split_file_backed_thp(void)
+void split_file_backed_thp(int order)
{
int status;
int fd;
- ssize_t num_written;
char tmpfs_template[] = "/tmp/thp_split_XXXXXX";
const char *tmpfs_loc = mkdtemp(tmpfs_template);
char testfile[INPUT_MAX];
+ ssize_t num_written, num_read;
+ char *file_buf1, *file_buf2;
uint64_t pgoff_start = 0, pgoff_end = 1024;
+ int i;
ksft_print_msg("Please enable pr_debug in split_huge_pages_in_file() for more info.\n");
+ file_buf1 = (char *)malloc(pmd_pagesize);
+ file_buf2 = (char *)malloc(pmd_pagesize);
+
+ if (!file_buf1 || !file_buf2) {
+ ksft_print_msg("cannot allocate file buffers\n");
+ goto out;
+ }
+
+ for (i = 0; i < pmd_pagesize; i++)
+ file_buf1[i] = (char)i;
+ memset(file_buf2, 0, pmd_pagesize);
+
status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m");
if (status)
@@ -281,26 +297,45 @@ void split_file_backed_thp(void)
status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc);
if (status >= INPUT_MAX) {
ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
+ goto cleanup;
}
- fd = open(testfile, O_CREAT|O_WRONLY, 0664);
+ fd = open(testfile, O_CREAT|O_RDWR, 0664);
if (fd == -1) {
ksft_perror("Cannot open testing file");
goto cleanup;
}
- /* write something to the file, so a file-backed THP can be allocated */
- num_written = write(fd, tmpfs_loc, strlen(tmpfs_loc) + 1);
- close(fd);
+ /* write pmd size data to the file, so a file-backed THP can be allocated */
+ num_written = write(fd, file_buf1, pmd_pagesize);
- if (num_written < 1) {
- ksft_perror("Fail to write data to testing file");
- goto cleanup;
+ if (num_written == -1 || num_written != pmd_pagesize) {
+ ksft_perror("Failed to write data to testing file");
+ goto close_file;
}
/* split the file-backed THP */
- write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0);
+ write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, order);
+
+ /* check file content after split */
+ status = lseek(fd, 0, SEEK_SET);
+ if (status == -1) {
+ ksft_perror("Cannot lseek file");
+ goto close_file;
+ }
+
+ num_read = read(fd, file_buf2, num_written);
+ if (num_read == -1 || num_read != num_written) {
+ ksft_perror("Cannot read file content back");
+ goto close_file;
+ }
+
+ if (strncmp(file_buf1, file_buf2, pmd_pagesize) != 0) {
+ ksft_print_msg("File content changed\n");
+ goto close_file;
+ }
+ close(fd);
status = unlink(testfile);
if (status) {
ksft_perror("Cannot remove testing file");
@@ -318,12 +353,15 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("cannot remove tmp dir: %s\n", strerror(errno));
ksft_print_msg("Please check dmesg for more information\n");
- ksft_test_result_pass("File-backed THP split test done\n");
+ ksft_test_result_pass("File-backed THP split to order %d test done\n", order);
return;
+close_file:
+ close(fd);
cleanup:
umount(tmpfs_loc);
rmdir(tmpfs_loc);
+out:
ksft_exit_fail_msg("Error occurred\n");
}
@@ -361,6 +399,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
{
size_t i;
int dummy = 0;
+ unsigned char buf[1024];
srand(time(NULL));
@@ -368,11 +407,12 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
if (*fd == -1)
ksft_exit_fail_msg("Failed to create a file at %s\n", testfile);
- for (i = 0; i < fd_size; i++) {
- unsigned char byte = (unsigned char)i;
+ assert(fd_size % sizeof(buf) == 0);
+ for (i = 0; i < sizeof(buf); i++)
+ buf[i] = (unsigned char)i;
+ for (i = 0; i < fd_size; i += sizeof(buf))
+ write(*fd, buf, sizeof(buf));
- write(*fd, &byte, sizeof(byte));
- }
close(*fd);
sync();
*fd = open("/proc/sys/vm/drop_caches", O_WRONLY);
@@ -420,7 +460,8 @@ err_out_unlink:
return -1;
}
-void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc)
+void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
+ int order, int offset)
{
int fd;
char *addr;
@@ -438,7 +479,12 @@ void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_l
return;
err = 0;
- write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order);
+ if (offset == -1)
+ write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
+ (uint64_t)addr + fd_size, order);
+ else
+ write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
+ (uint64_t)addr + fd_size, order, offset);
for (i = 0; i < fd_size; i++)
if (*(addr + i) != (char)i) {
@@ -457,9 +503,15 @@ out:
munmap(addr, fd_size);
close(fd);
unlink(testfile);
- if (err)
- ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
- ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
+ if (offset == -1) {
+ if (err)
+ ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
+ ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
+ } else {
+ if (err)
+ ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d at in-folio offset %d failed\n", order, offset);
+ ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d at in-folio offset %d passed\n", order, offset);
+ }
}
int main(int argc, char **argv)
@@ -470,6 +522,7 @@ int main(int argc, char **argv)
char fs_loc_template[] = "/tmp/thp_fs_XXXXXX";
const char *fs_loc;
bool created_tmp;
+ int offset;
ksft_print_header();
@@ -481,7 +534,7 @@ int main(int argc, char **argv)
if (argc > 1)
optional_xfs_path = argv[1];
- ksft_set_plan(1+8+2+9);
+ ksft_set_plan(1+8+1+9+9+8*4+2);
pagesize = getpagesize();
pageshift = ffs(pagesize) - 1;
@@ -498,12 +551,19 @@ int main(int argc, char **argv)
split_pmd_thp_to_order(i);
split_pte_mapped_thp();
- split_file_backed_thp();
+ for (i = 0; i < 9; i++)
+ split_file_backed_thp(i);
created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template,
&fs_loc);
for (i = 8; i >= 0; i--)
- split_thp_in_pagecache_to_order(fd_size, i, fs_loc);
+ split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1);
+
+ for (i = 0; i < 9; i++)
+ for (offset = 0;
+ offset < pmd_pagesize / pagesize;
+ offset += MAX(pmd_pagesize / pagesize / 4, 1 << i))
+ split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset);
cleanup_thp_fs(fs_loc, created_tmp);
ksft_finished();
diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c
index e4370b79b62f..cd5174d735be 100644
--- a/tools/testing/selftests/mm/thuge-gen.c
+++ b/tools/testing/selftests/mm/thuge-gen.c
@@ -127,7 +127,7 @@ void test_mmap(unsigned long size, unsigned flags)
show(size);
ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES,
- "%s mmap\n", __func__);
+ "%s mmap %lu\n", __func__, size);
if (munmap(map, size * NUM_PAGES))
ksft_exit_fail_msg("%s: unmap %s\n", __func__, strerror(errno));
@@ -165,7 +165,7 @@ void test_shmget(unsigned long size, unsigned flags)
show(size);
ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES,
- "%s: mmap\n", __func__);
+ "%s: mmap %lu\n", __func__, size);
if (shmdt(map))
ksft_exit_fail_msg("%s: shmdt: %s\n", __func__, strerror(errno));
}
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 717539eddf98..a37088a23ffe 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -10,7 +10,7 @@
#define BASE_PMD_ADDR ((void *)(1UL << 30))
volatile bool test_uffdio_copy_eexist = true;
-unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
int uffd = -1, uffd_flags, finished, *pipefd, test_type;
bool map_shared;
@@ -269,7 +269,7 @@ void uffd_test_ctx_clear(void)
size_t i;
if (pipefd) {
- for (i = 0; i < nr_cpus * 2; ++i) {
+ for (i = 0; i < nr_parallel * 2; ++i) {
if (close(pipefd[i]))
err("close pipefd");
}
@@ -323,7 +323,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
ret = userfaultfd_open(&features);
if (ret) {
if (errmsg)
- *errmsg = "possible lack of priviledge";
+ *errmsg = "possible lack of privilege";
return ret;
}
@@ -348,7 +348,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
/*
* After initialization of area_src, we must explicitly release pages
* for area_dst to make sure it's fully empty. Otherwise we could have
- * some area_dst pages be errornously initialized with zero pages,
+ * some area_dst pages be erroneously initialized with zero pages,
* hence we could hit memory corruption later in the test.
*
* One example is when THP is globally enabled, above allocate_area()
@@ -365,10 +365,10 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
*/
uffd_test_ops->release_pages(area_dst);
- pipefd = malloc(sizeof(int) * nr_cpus * 2);
+ pipefd = malloc(sizeof(int) * nr_parallel * 2);
if (!pipefd)
err("pipefd");
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
err("pipe");
@@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags)
int uffd_open_sys(unsigned int flags)
{
+#ifdef __NR_userfaultfd
return syscall(__NR_userfaultfd, flags);
+#else
+ return -1;
+#endif
}
int uffd_open(unsigned int flags)
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index a70ae10b5f62..7700cbfa3975 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -98,7 +98,7 @@ struct uffd_test_case_ops {
};
typedef struct uffd_test_case_ops uffd_test_case_ops_t;
-extern unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+extern unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
extern int uffd, uffd_flags, finished, *pipefd, test_type;
extern bool map_shared;
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index a4b83280998a..40af7f67c407 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -33,10 +33,11 @@
* pthread_mutex_lock will also verify the atomicity of the memory
* transfer (UFFDIO_COPY).
*/
-#include <asm-generic/unistd.h>
+
#include "uffd-common.h"
uint64_t features;
+#ifdef __NR_userfaultfd
#define BOUNCE_RANDOM (1<<0)
#define BOUNCE_RACINGFAULTS (1<<1)
@@ -179,12 +180,12 @@ static void *background_thread(void *arg)
static int stress(struct uffd_args *args)
{
unsigned long cpu;
- pthread_t locking_threads[nr_cpus];
- pthread_t uffd_threads[nr_cpus];
- pthread_t background_threads[nr_cpus];
+ pthread_t locking_threads[nr_parallel];
+ pthread_t uffd_threads[nr_parallel];
+ pthread_t background_threads[nr_parallel];
finished = 0;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (cpu = 0; cpu < nr_parallel; cpu++) {
if (pthread_create(&locking_threads[cpu], &attr,
locking_thread, (void *)cpu))
return 1;
@@ -202,7 +203,7 @@ static int stress(struct uffd_args *args)
background_thread, (void *)cpu))
return 1;
}
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pthread_join(background_threads[cpu], NULL))
return 1;
@@ -218,11 +219,11 @@ static int stress(struct uffd_args *args)
uffd_test_ops->release_pages(area_src);
finished = 1;
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pthread_join(locking_threads[cpu], NULL))
return 1;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (cpu = 0; cpu < nr_parallel; cpu++) {
char c;
if (bounces & BOUNCE_POLL) {
if (write(pipefd[cpu*2+1], &c, 1) != 1)
@@ -245,11 +246,11 @@ static int userfaultfd_stress(void)
{
void *area;
unsigned long nr;
- struct uffd_args args[nr_cpus];
+ struct uffd_args args[nr_parallel];
uint64_t mem_size = nr_pages * page_size;
int flags = 0;
- memset(args, 0, sizeof(struct uffd_args) * nr_cpus);
+ memset(args, 0, sizeof(struct uffd_args) * nr_parallel);
if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON)
flags = UFFD_FEATURE_WP_UNPOPULATED;
@@ -324,7 +325,7 @@ static int userfaultfd_stress(void)
*/
uffd_test_ops->release_pages(area_dst);
- uffd_stats_reset(args, nr_cpus);
+ uffd_stats_reset(args, nr_parallel);
/* bounce pass */
if (stress(args)) {
@@ -358,7 +359,7 @@ static int userfaultfd_stress(void)
swap(area_src_alias, area_dst_alias);
- uffd_stats_report(args, nr_cpus);
+ uffd_stats_report(args, nr_parallel);
}
uffd_test_ctx_clear();
@@ -411,8 +412,8 @@ static void parse_test_type_arg(const char *raw_type)
* feature.
*/
- if (uffd_get_features(&features))
- err("failed to get available features");
+ if (uffd_get_features(&features) && errno == ENOENT)
+ ksft_exit_skip("failed to get available features (%d)\n", errno);
test_uffdio_wp = test_uffdio_wp &&
(features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
@@ -434,6 +435,7 @@ static void sigalrm(int sig)
int main(int argc, char **argv)
{
+ unsigned long nr_cpus;
size_t bytes;
if (argc < 4)
@@ -453,10 +455,19 @@ int main(int argc, char **argv)
}
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr_cpus > 32) {
+ /* Don't let calculation below go to zero. */
+ ksft_print_msg("_SC_NPROCESSORS_ONLN (%lu) too large, capping nr_threads to 32\n",
+ nr_cpus);
+ nr_parallel = 32;
+ } else {
+ nr_parallel = nr_cpus;
+ }
- nr_pages_per_cpu = bytes / page_size / nr_cpus;
+ nr_pages_per_cpu = bytes / page_size / nr_parallel;
if (!nr_pages_per_cpu) {
- _err("invalid MiB");
+ _err("pages_per_cpu = 0, cannot test (%lu / %lu / %lu)",
+ bytes, page_size, nr_parallel);
usage();
}
@@ -465,9 +476,21 @@ int main(int argc, char **argv)
_err("invalid bounces");
usage();
}
- nr_pages = nr_pages_per_cpu * nr_cpus;
+ nr_pages = nr_pages_per_cpu * nr_parallel;
printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
nr_pages, nr_pages_per_cpu);
return userfaultfd_stress();
}
+
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+ printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 9ff71fa1f9bf..e8fd9011c2a3 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -5,11 +5,12 @@
* Copyright (C) 2015-2023 Red Hat, Inc.
*/
-#include <asm-generic/unistd.h>
#include "uffd-common.h"
#include "../../../../mm/gup_test.h"
+#ifdef __NR_userfaultfd
+
/* The unit test doesn't need a large or random size, make it 32MB for now */
#define UFFD_TEST_MEM_SIZE (32UL << 20)
@@ -25,6 +26,8 @@
#define ALIGN_UP(x, align_to) \
((__typeof__(x))((((unsigned long)(x)) + ((align_to)-1)) & ~((align_to)-1)))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
struct mem_type {
const char *name;
unsigned int mem_flag;
@@ -195,9 +198,10 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test,
else
page_size = psize();
- nr_pages = UFFD_TEST_MEM_SIZE / page_size;
+ /* Ensure we have at least 2 pages */
+ nr_pages = MAX(UFFD_TEST_MEM_SIZE, page_size * 2) / page_size;
/* TODO: remove this global var.. it's so ugly */
- nr_cpus = 1;
+ nr_parallel = 1;
/* Initialize test arguments */
args->mem_type = mem_type;
@@ -1558,3 +1562,14 @@ int main(int argc, char *argv[])
return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
}
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+ printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c
index 2c4f984bd73c..c2ba7d46c7b4 100644
--- a/tools/testing/selftests/mm/uffd-wp-mremap.c
+++ b/tools/testing/selftests/mm/uffd-wp-mremap.c
@@ -182,7 +182,10 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb
/* Register range for uffd-wp. */
if (userfaultfd_open(&features)) {
- ksft_test_result_fail("userfaultfd_open() failed\n");
+ if (errno == ENOENT)
+ ksft_test_result_skip("userfaultfd not available\n");
+ else
+ ksft_test_result_fail("userfaultfd_open() failed\n");
goto out;
}
if (uffd_register(uffd, mem, size, false, true, false)) {
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index 2c725773cd79..1f92e8caceac 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -41,6 +41,31 @@ check_supported_x86_64()
fi
}
+check_supported_ppc64()
+{
+ local config="/proc/config.gz"
+ [[ -f "${config}" ]] || config="/boot/config-$(uname -r)"
+ [[ -f "${config}" ]] || fail "Cannot find kernel config in /proc or /boot"
+
+ local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+ if [[ "${pg_table_levels}" -lt 5 ]]; then
+ echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ exit $ksft_skip
+ fi
+
+ local mmu_support=$(grep -m1 "mmu" /proc/cpuinfo | awk '{print $3}')
+ if [[ "$mmu_support" != "radix" ]]; then
+ echo "$0: System does not use Radix MMU, required for 5-level paging"
+ exit $ksft_skip
+ fi
+
+ local hugepages_total=$(awk '/HugePages_Total/ {print $2}' /proc/meminfo)
+ if [[ "${hugepages_total}" -eq 0 ]]; then
+ echo "$0: HugePages are not enabled, required for some tests"
+ exit $ksft_skip
+ fi
+}
+
check_test_requirements()
{
# The test supports x86_64 and powerpc64. We currently have no useful
@@ -50,6 +75,9 @@ check_test_requirements()
"x86_64")
check_supported_x86_64
;;
+ "ppc64le"|"ppc64")
+ check_supported_ppc64
+ ;;
*)
return 0
;;
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index b60ac68a9dc8..6effafdc4d8a 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -5,11 +5,13 @@
#include <err.h>
#include <strings.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
+#include "../kselftest.h"
#define BIT_ULL(nr) (1ULL << (nr))
#define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
#define PM_UFFD_WP BIT_ULL(57)
+#define PM_GUARD_REGION BIT_ULL(58)
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
@@ -31,6 +33,23 @@ static inline unsigned int pshift(void)
return __page_shift;
}
+/*
+ * Plan 9 FS has bugs (at least on QEMU) where certain operations fail with
+ * ENOENT on unlinked files. See
+ * https://gitlab.com/qemu-project/qemu/-/issues/103 for some info about such
+ * bugs. There are rumours of NFS implementations with similar bugs.
+ *
+ * Ideally, tests should just detect filesystems known to have such issues and
+ * bail early. But 9pfs has the additional "feature" that it causes fstatfs to
+ * pass through the f_type field from the host filesystem. To avoid having to
+ * scrape /proc/mounts or some other hackery, tests can call this function when
+ * it seems such a bug might have been encountered.
+ */
+static inline void skip_test_dodgy_fs(const char *op_name)
+{
+ ksft_test_result_skip("%s failed with ENOENT. Filesystem might be buggy (9pfs?)\n", op_name);
+}
+
uint64_t pagemap_get_entry(int fd, char *start);
bool pagemap_is_softdirty(int fd, char *start);
bool pagemap_is_swapped(int fd, char *start);