summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/uaccess.h
diff options
context:
space:
mode:
authorHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>2009-01-30 18:16:46 -0800
committerH. Peter Anvin <hpa@linux.intel.com>2009-02-04 17:28:21 -0800
commit18114f61359ac05e3aa797d53d63f40db41f798d (patch)
tree7eccf7f9377329b9f0b91ac7b72a7a6344f7c4f1 /arch/x86/include/asm/uaccess.h
parent019a1369667c3978f9644982ebe6d261dd2bbc40 (diff)
downloadlwn-18114f61359ac05e3aa797d53d63f40db41f798d.tar.gz
lwn-18114f61359ac05e3aa797d53d63f40db41f798d.zip
x86: uaccess: use errret as error value in __put_user_size()
Impact: cleanup In __put_user_size() macro errret is used for error value. But if size is 8, errret isn't passed to__put_user_asm_u64(). This behavior is inconsistent. Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r--arch/x86/include/asm/uaccess.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index b9a24155f7af..b685ece89d5c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -186,7 +186,7 @@ extern int __get_user_bad(void);
#ifdef CONFIG_X86_32
-#define __put_user_asm_u64(x, addr, err) \
+#define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"3:\n" \
@@ -197,7 +197,7 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
- : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \
@@ -211,8 +211,8 @@ extern int __get_user_bad(void);
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
-#define __put_user_asm_u64(x, ptr, retval) \
- __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
+#define __put_user_asm_u64(x, ptr, retval, errret) \
+ __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
@@ -289,7 +289,8 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \
+ __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
+ errret); \
break; \
default: \
__put_user_bad(); \