summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-22 11:19:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-22 11:19:35 -0700
commitde5cb0dcb74c294ec527eddfe5094acfdb21ff21 (patch)
treef669adb523b265f9b979c136df8a8334ea74ec62
parentaf9c191ac2a0c857f59d75b6812fef078ab1cefe (diff)
parent533ab223aa1a036cfe5d6747fa3be92069f80988 (diff)
downloadlwn-de5cb0dcb74c294ec527eddfe5094acfdb21ff21.tar.gz
lwn-de5cb0dcb74c294ec527eddfe5094acfdb21ff21.zip
Merge branch 'address-masking'
Merge user access fast validation using address masking. This allows architectures to optionally use a data dependent address masking model instead of a conditional branch for validating user accesses. That avoids the Spectre-v1 speculation barriers. Right now only x86-64 takes advantage of this, and not all architectures will be able to do it. It requires a guard region between the user and kernel address spaces (so that you can't overflow from one to the other), and an easy way to generate a guaranteed-to-fault address for invalid user pointers. Also note that this currently assumes that there is no difference between user read and write accesses. If extended to architectures like powerpc, we'll also need to separate out the user read-vs-write cases. * address-masking: x86: make the masked_user_access_begin() macro use its argument only once x86: do the user address masking outside the user access area x86: support user address masking instead of non-speculative conditional
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--fs/select.c4
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
5 files changed, 39 insertions, 1 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 04789f45ab2b..afce8ee5d7b7 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#define valid_user_address(x) ((__force long)(x) >= 0)
/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
+#define masked_user_access_begin(x) ({ \
+ __auto_type __masked_ptr = (x); \
+ __masked_ptr = mask_user_address(__masked_ptr); \
+ __uaccess_begin(); __masked_ptr; })
+
+/*
* User pointers can have tag bits on x86-64. This scheme tolerates
* arbitrary values in those bits rather then masking them off.
*
diff --git a/fs/select.c b/fs/select.c
index cae82e9e0dcc..437034ed85c6 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -777,7 +777,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (!user_read_access_begin(from, sizeof(*from)))
+ if (can_do_masked_user_access())
+ from = masked_user_access_begin(from);
+ else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(to->p, &from->p, Efault);
unsafe_get_user(to->size, &from->size, Efault);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d8e4105a2f21..39c7cf82b0c2 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -33,6 +33,13 @@
})
#endif
+#ifdef masked_user_access_begin
+ #define can_do_masked_user_access() 1
+#else
+ #define can_do_masked_user_access() 0
+ #define masked_user_access_begin(src) NULL
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6432b8c3e431..989a12a67872 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ src = masked_user_access_begin(src);
+ retval = do_strncpy_from_user(dst, src, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index feeb935a2299..6e489f9e90f1 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ str = masked_user_access_begin(str);
+ retval = do_strnlen_user(str, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {