From: "Andi Kleen" Move put_user out of line. Generates smaller code. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton --- 25-akpm/arch/x86_64/lib/putuser.S | 89 +++++++++++++++++++++++++++++++++++ 25-akpm/include/asm-x86_64/uaccess.h | 16 +++--- 2 files changed, 99 insertions(+), 6 deletions(-) diff -puN arch/x86_64/lib/putuser.S~x86_64-move-put_user-out-of-line arch/x86_64/lib/putuser.S --- 25/arch/x86_64/lib/putuser.S~x86_64-move-put_user-out-of-line Wed Mar 23 15:38:39 2005 +++ 25-akpm/arch/x86_64/lib/putuser.S Wed Mar 23 15:38:39 2005 @@ -0,0 +1,89 @@ +/* + * __put_user functions. + * + * (C) Copyright 1998 Linus Torvalds + * (C) Copyright 2005 Andi Kleen + * + * These functions have a non-standard call interface + * to make them more efficient, especially as they + * return an error value in addition to the "real" + * return value. + */ + +/* + * __put_user_X + * + * Inputs: %rcx contains the address + * %rdx contains new value + * + * Outputs: %rax is error code (0 or -EFAULT) + * + * %r8 is destroyed. + * + * These functions should not modify any other registers, + * as they get called from within inline assembly. + */ + +#include +#include +#include +#include +#include + + .text + .p2align 4 +.globl __put_user_1 +__put_user_1: + GET_THREAD_INFO(%r8) + cmpq threadinfo_addr_limit(%r8),%rcx + jae bad_put_user +1: movb %dl,(%rcx) + xorl %eax,%eax + ret + + .p2align 4 +.globl __put_user_2 +__put_user_2: + GET_THREAD_INFO(%r8) + addq $1,%rcx + jc bad_put_user + cmpq threadinfo_addr_limit(%r8),%rcx + jae bad_put_user +2: movw %dx,-1(%rcx) + xorl %eax,%eax + ret + + .p2align 4 +.globl __put_user_4 +__put_user_4: + GET_THREAD_INFO(%r8) + addq $3,%rcx + jc bad_put_user + cmpq threadinfo_addr_limit(%r8),%rcx + jae bad_put_user +3: movl %edx,-3(%rcx) + xorl %eax,%eax + ret + + .p2align 4 +.globl __put_user_8 +__put_user_8: + GET_THREAD_INFO(%r8) + addq $7,%rcx + jc bad_put_user + cmpq threadinfo_addr_limit(%r8),%rcx + jae bad_put_user +4: movq %rdx,-7(%rcx) + xorl %eax,%eax + ret + +bad_put_user: + movq $(-EFAULT),%rax + ret + +.section __ex_table,"a" + .quad 1b,bad_put_user + .quad 2b,bad_put_user + .quad 3b,bad_put_user + .quad 4b,bad_put_user +.previous diff -puN include/asm-x86_64/uaccess.h~x86_64-move-put_user-out-of-line include/asm-x86_64/uaccess.h --- 25/include/asm-x86_64/uaccess.h~x86_64-move-put_user-out-of-line Wed Mar 23 15:38:39 2005 +++ 25-akpm/include/asm-x86_64/uaccess.h Wed Mar 23 15:38:39 2005 @@ -122,14 +122,13 @@ extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); - extern void __put_user_bad(void); #define __put_user_x(size,ret,x,ptr) \ __asm__ __volatile__("call __put_user_" #size \ :"=a" (ret) \ - :"0" (ptr),"d" (x) \ - :"rbx") + :"c" (ptr),"d" (x) \ + :"r8") #define put_user(x,ptr) \ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) @@ -152,10 +151,15 @@ extern void __put_user_bad(void); #define __put_user_check(x,ptr,size) \ ({ \ - int __pu_err = -EFAULT; \ + int __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ + switch (size) { \ + case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ + case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ + case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ + case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ + default: __put_user_bad(); \ + } \ __pu_err; \ }) _