summaryrefslogtreecommitdiff
path: root/include/linux/uaccess.h
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2019-09-25 16:47:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-25 17:51:40 -0700
commit9dd819a15162f8f82a6001b090caa38c18297b39 (patch)
tree3dbec1875f99adf45fbf236dcfa53efd1497e8cb /include/linux/uaccess.h
parentd5372c39132958679c480d0295dd328c741c7a41 (diff)
downloadlwn-9dd819a15162f8f82a6001b090caa38c18297b39.tar.gz
lwn-9dd819a15162f8f82a6001b090caa38c18297b39.zip
uaccess: add missing __must_check attributes
The usercopy implementation comments describe that callers of the copy_*_user() family of functions must always have their return values checked. This can be enforced at compile time with __must_check, so add it where needed. Link: http://lkml.kernel.org/r/201908251609.ADAD5CAAC1@keescook Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/uaccess.h')
-rw-r--r--include/linux/uaccess.h21
1 files changed, 11 insertions, 10 deletions
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef INLINE_COPY_FROM_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}