2005-04-17 06:20:36 +08:00
|
|
|
/* Copyright 2002 Andi Kleen, SuSE Labs.
|
|
|
|
* Subject to the GNU Public License v2.
|
|
|
|
*
|
|
|
|
* Functions to copy from and to user space.
|
|
|
|
*/
|
|
|
|
|
2006-09-26 16:52:32 +08:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/dwarf2.h>
|
|
|
|
|
2006-02-04 04:51:02 +08:00
|
|
|
#define FIX_ALIGNMENT 1
|
|
|
|
|
2006-09-26 16:52:39 +08:00
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
|
|
|
|
.macro ALTERNATIVE_JUMP feature,orig,alt
|
|
|
|
0:
|
|
|
|
.byte 0xe9 /* 32bit jump */
|
|
|
|
.long \orig-1f /* by default jump to orig */
|
|
|
|
1:
|
|
|
|
.section .altinstr_replacement,"ax"
|
|
|
|
2: .byte 0xe9 /* near jump with 32bit immediate */
|
|
|
|
.long \alt-1b /* offset */ /* or alternatively to alt */
|
|
|
|
.previous
|
|
|
|
.section .altinstructions,"a"
|
|
|
|
.align 8
|
|
|
|
.quad 0b
|
|
|
|
.quad 2b
|
|
|
|
.byte \feature /* when feature is set */
|
|
|
|
.byte 5
|
|
|
|
.byte 5
|
|
|
|
.previous
|
|
|
|
.endm
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Standard copy_to_user with segment limit checking */
|
2006-09-26 16:52:32 +08:00
|
|
|
ENTRY(copy_to_user)
|
|
|
|
CFI_STARTPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rax)
|
|
|
|
movq %rdi,%rcx
|
|
|
|
addq %rdx,%rcx
|
|
|
|
jc bad_to_user
|
2008-06-24 22:19:35 +08:00
|
|
|
cmpq TI_addr_limit(%rax),%rcx
|
2005-04-17 06:20:36 +08:00
|
|
|
jae bad_to_user
|
2006-09-26 16:52:39 +08:00
|
|
|
xorl %eax,%eax /* clear zero flag */
|
|
|
|
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ENDPROC
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2006-09-26 16:52:39 +08:00
|
|
|
ENTRY(copy_user_generic)
|
|
|
|
CFI_STARTPROC
|
|
|
|
movl $1,%ecx /* set zero flag */
|
|
|
|
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
|
|
|
|
CFI_ENDPROC
|
|
|
|
|
|
|
|
ENTRY(__copy_from_user_inatomic)
|
|
|
|
CFI_STARTPROC
|
|
|
|
xorl %ecx,%ecx /* clear zero flag */
|
|
|
|
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
|
|
|
|
CFI_ENDPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Standard copy_from_user with segment limit checking */
|
2006-09-26 16:52:32 +08:00
|
|
|
ENTRY(copy_from_user)
|
|
|
|
CFI_STARTPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rax)
|
|
|
|
movq %rsi,%rcx
|
|
|
|
addq %rdx,%rcx
|
|
|
|
jc bad_from_user
|
2008-06-24 22:19:35 +08:00
|
|
|
cmpq TI_addr_limit(%rax),%rcx
|
2005-04-17 06:20:36 +08:00
|
|
|
jae bad_from_user
|
2006-09-26 16:52:39 +08:00
|
|
|
movl $1,%ecx /* set zero flag */
|
|
|
|
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ENDPROC
|
|
|
|
ENDPROC(copy_from_user)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.section .fixup,"ax"
|
|
|
|
/* must zero dest */
|
|
|
|
bad_from_user:
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_STARTPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
movl %edx,%ecx
|
|
|
|
xorl %eax,%eax
|
|
|
|
rep
|
|
|
|
stosb
|
|
|
|
bad_to_user:
|
|
|
|
movl %edx,%eax
|
|
|
|
ret
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ENDPROC
|
|
|
|
END(bad_from_user)
|
2005-04-17 06:20:36 +08:00
|
|
|
.previous
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2006-09-26 16:52:39 +08:00
|
|
|
* copy_user_generic_unrolled - memory copy with exception handling.
|
|
|
|
* This version is for CPUs like P4 that don't have efficient micro code for rep movsq
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
2006-09-26 16:52:39 +08:00
|
|
|
* ecx zero flag -- if true zero destination on error
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
* eax uncopied bytes or 0 if successful.
|
|
|
|
*/
|
2006-09-26 16:52:39 +08:00
|
|
|
ENTRY(copy_user_generic_unrolled)
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_STARTPROC
|
2006-02-04 04:51:02 +08:00
|
|
|
pushq %rbx
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
CFI_REL_OFFSET rbx, 0
|
2006-09-26 16:52:39 +08:00
|
|
|
pushq %rcx
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
CFI_REL_OFFSET rcx, 0
|
2006-02-04 04:51:02 +08:00
|
|
|
xorl %eax,%eax /*zero for the exception handler */
|
|
|
|
|
|
|
|
#ifdef FIX_ALIGNMENT
|
|
|
|
/* check for bad alignment of destination */
|
|
|
|
movl %edi,%ecx
|
|
|
|
andl $7,%ecx
|
|
|
|
jnz .Lbad_alignment
|
|
|
|
.Lafter_bad_alignment:
|
|
|
|
#endif
|
|
|
|
|
|
|
|
movq %rdx,%rcx
|
|
|
|
|
|
|
|
movl $64,%ebx
|
|
|
|
shrq $6,%rdx
|
|
|
|
decq %rdx
|
|
|
|
js .Lhandle_tail
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
.Lloop:
|
|
|
|
.Ls1: movq (%rsi),%r11
|
|
|
|
.Ls2: movq 1*8(%rsi),%r8
|
|
|
|
.Ls3: movq 2*8(%rsi),%r9
|
|
|
|
.Ls4: movq 3*8(%rsi),%r10
|
|
|
|
.Ld1: movq %r11,(%rdi)
|
|
|
|
.Ld2: movq %r8,1*8(%rdi)
|
|
|
|
.Ld3: movq %r9,2*8(%rdi)
|
|
|
|
.Ld4: movq %r10,3*8(%rdi)
|
|
|
|
|
|
|
|
.Ls5: movq 4*8(%rsi),%r11
|
|
|
|
.Ls6: movq 5*8(%rsi),%r8
|
|
|
|
.Ls7: movq 6*8(%rsi),%r9
|
|
|
|
.Ls8: movq 7*8(%rsi),%r10
|
|
|
|
.Ld5: movq %r11,4*8(%rdi)
|
|
|
|
.Ld6: movq %r8,5*8(%rdi)
|
|
|
|
.Ld7: movq %r9,6*8(%rdi)
|
|
|
|
.Ld8: movq %r10,7*8(%rdi)
|
|
|
|
|
|
|
|
decq %rdx
|
|
|
|
|
|
|
|
leaq 64(%rsi),%rsi
|
|
|
|
leaq 64(%rdi),%rdi
|
|
|
|
|
|
|
|
jns .Lloop
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
.Lhandle_tail:
|
|
|
|
movl %ecx,%edx
|
|
|
|
andl $63,%ecx
|
|
|
|
shrl $3,%ecx
|
|
|
|
jz .Lhandle_7
|
|
|
|
movl $8,%ebx
|
|
|
|
.p2align 4
|
|
|
|
.Lloop_8:
|
|
|
|
.Ls9: movq (%rsi),%r8
|
|
|
|
.Ld9: movq %r8,(%rdi)
|
|
|
|
decl %ecx
|
|
|
|
leaq 8(%rdi),%rdi
|
|
|
|
leaq 8(%rsi),%rsi
|
|
|
|
jnz .Lloop_8
|
|
|
|
|
|
|
|
.Lhandle_7:
|
|
|
|
movl %edx,%ecx
|
|
|
|
andl $7,%ecx
|
|
|
|
jz .Lende
|
|
|
|
.p2align 4
|
|
|
|
.Lloop_1:
|
|
|
|
.Ls10: movb (%rsi),%bl
|
|
|
|
.Ld10: movb %bl,(%rdi)
|
|
|
|
incq %rdi
|
|
|
|
incq %rsi
|
|
|
|
decl %ecx
|
|
|
|
jnz .Lloop_1
|
|
|
|
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_REMEMBER_STATE
|
2006-02-04 04:51:02 +08:00
|
|
|
.Lende:
|
2006-09-26 16:52:39 +08:00
|
|
|
popq %rcx
|
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
|
|
CFI_RESTORE rcx
|
2006-02-04 04:51:02 +08:00
|
|
|
popq %rbx
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
|
|
CFI_RESTORE rbx
|
2006-02-04 04:51:02 +08:00
|
|
|
ret
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_RESTORE_STATE
|
2006-02-04 04:51:02 +08:00
|
|
|
|
|
|
|
#ifdef FIX_ALIGNMENT
|
|
|
|
/* align destination */
|
|
|
|
.p2align 4
|
|
|
|
.Lbad_alignment:
|
|
|
|
movl $8,%r9d
|
|
|
|
subl %ecx,%r9d
|
|
|
|
movl %r9d,%ecx
|
|
|
|
cmpq %r9,%rdx
|
|
|
|
jz .Lhandle_7
|
|
|
|
js .Lhandle_7
|
|
|
|
.Lalign_1:
|
|
|
|
.Ls11: movb (%rsi),%bl
|
|
|
|
.Ld11: movb %bl,(%rdi)
|
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
|
|
|
jnz .Lalign_1
|
|
|
|
subq %r9,%rdx
|
|
|
|
jmp .Lafter_bad_alignment
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* table sorted by exception address */
|
|
|
|
.section __ex_table,"a"
|
|
|
|
.align 8
|
x86-64: Fix "bytes left to copy" return value for copy_from_user()
Most users by far do not care about the exact return value (they only
really care about whether the copy succeeded in its entirety or not),
but a few special core routines actually care deeply about exactly how
many bytes were copied from user space.
And the unrolled versions of the x86-64 user copy routines would
sometimes report that it had copied more bytes than it actually had.
Very few uses actually have partial copies to begin with, but to make
this bug even harder to trigger, most x86 CPU's use the "rep string"
instructions for normal user copies, and that version didn't have this
issue.
To make it even harder to hit, the one user of this that really cared
about the return value (and used the uncached version of the copy that
doesn't use the "rep string" instructions) was the generic write
routine, which pre-populated its source, once more hiding the problem by
avoiding the exception case that triggers the bug.
In other words, very special thanks to Bron Gondwana who not only
triggered this, but created a test-program to show it, and bisected the
behavior down to commit 08291429cfa6258c4cd95d8833beb40f828b194e ("mm:
fix pagecache write deadlocks") which changed the access pattern just
enough that you can now trigger it with 'writev()' with multiple
iovec's.
That commit itself was not the cause of the bug, it just allowed all the
stars to align just right that you could trigger the problem.
[ Side note: this is just the minimal fix to make the copy routines
(with __copy_from_user_inatomic_nocache as the particular version that
was involved in showing this) have the right return values.
We really should improve on the exceptional case further - to make the
copy do a byte-accurate copy up to the exact page limit that causes it
to fail. As it is, the callers have to do extra work to handle the
limit case gracefully. ]
Reported-by: Bron Gondwana <brong@fastmail.fm>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(which didn't have this problem), and since
most users that do the carethis was very hard to trigger, but
2008-06-18 08:47:50 +08:00
|
|
|
.quad .Ls1,.Ls1e /* Ls1-Ls4 have copied zero bytes */
|
|
|
|
.quad .Ls2,.Ls1e
|
|
|
|
.quad .Ls3,.Ls1e
|
|
|
|
.quad .Ls4,.Ls1e
|
|
|
|
.quad .Ld1,.Ls1e /* Ld1-Ld4 have copied 0-24 bytes */
|
2006-02-04 04:51:02 +08:00
|
|
|
.quad .Ld2,.Ls2e
|
|
|
|
.quad .Ld3,.Ls3e
|
|
|
|
.quad .Ld4,.Ls4e
|
x86-64: Fix "bytes left to copy" return value for copy_from_user()
Most users by far do not care about the exact return value (they only
really care about whether the copy succeeded in its entirety or not),
but a few special core routines actually care deeply about exactly how
many bytes were copied from user space.
And the unrolled versions of the x86-64 user copy routines would
sometimes report that it had copied more bytes than it actually had.
Very few uses actually have partial copies to begin with, but to make
this bug even harder to trigger, most x86 CPU's use the "rep string"
instructions for normal user copies, and that version didn't have this
issue.
To make it even harder to hit, the one user of this that really cared
about the return value (and used the uncached version of the copy that
doesn't use the "rep string" instructions) was the generic write
routine, which pre-populated its source, once more hiding the problem by
avoiding the exception case that triggers the bug.
In other words, very special thanks to Bron Gondwana who not only
triggered this, but created a test-program to show it, and bisected the
behavior down to commit 08291429cfa6258c4cd95d8833beb40f828b194e ("mm:
fix pagecache write deadlocks") which changed the access pattern just
enough that you can now trigger it with 'writev()' with multiple
iovec's.
That commit itself was not the cause of the bug, it just allowed all the
stars to align just right that you could trigger the problem.
[ Side note: this is just the minimal fix to make the copy routines
(with __copy_from_user_inatomic_nocache as the particular version that
was involved in showing this) have the right return values.
We really should improve on the exceptional case further - to make the
copy do a byte-accurate copy up to the exact page limit that causes it
to fail. As it is, the callers have to do extra work to handle the
limit case gracefully. ]
Reported-by: Bron Gondwana <brong@fastmail.fm>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(which didn't have this problem), and since
most users that do the carethis was very hard to trigger, but
2008-06-18 08:47:50 +08:00
|
|
|
.quad .Ls5,.Ls5e /* Ls5-Ls8 have copied 32 bytes */
|
|
|
|
.quad .Ls6,.Ls5e
|
|
|
|
.quad .Ls7,.Ls5e
|
|
|
|
.quad .Ls8,.Ls5e
|
|
|
|
.quad .Ld5,.Ls5e /* Ld5-Ld8 have copied 32-56 bytes */
|
2006-02-04 04:51:02 +08:00
|
|
|
.quad .Ld6,.Ls6e
|
|
|
|
.quad .Ld7,.Ls7e
|
|
|
|
.quad .Ld8,.Ls8e
|
|
|
|
.quad .Ls9,.Le_quad
|
|
|
|
.quad .Ld9,.Le_quad
|
|
|
|
.quad .Ls10,.Le_byte
|
|
|
|
.quad .Ld10,.Le_byte
|
|
|
|
#ifdef FIX_ALIGNMENT
|
|
|
|
.quad .Ls11,.Lzero_rest
|
|
|
|
.quad .Ld11,.Lzero_rest
|
|
|
|
#endif
|
|
|
|
.quad .Le5,.Le_zero
|
|
|
|
.previous
|
|
|
|
|
|
|
|
/* eax: zero, ebx: 64 */
|
x86-64: Fix "bytes left to copy" return value for copy_from_user()
Most users by far do not care about the exact return value (they only
really care about whether the copy succeeded in its entirety or not),
but a few special core routines actually care deeply about exactly how
many bytes were copied from user space.
And the unrolled versions of the x86-64 user copy routines would
sometimes report that it had copied more bytes than it actually had.
Very few uses actually have partial copies to begin with, but to make
this bug even harder to trigger, most x86 CPU's use the "rep string"
instructions for normal user copies, and that version didn't have this
issue.
To make it even harder to hit, the one user of this that really cared
about the return value (and used the uncached version of the copy that
doesn't use the "rep string" instructions) was the generic write
routine, which pre-populated its source, once more hiding the problem by
avoiding the exception case that triggers the bug.
In other words, very special thanks to Bron Gondwana who not only
triggered this, but created a test-program to show it, and bisected the
behavior down to commit 08291429cfa6258c4cd95d8833beb40f828b194e ("mm:
fix pagecache write deadlocks") which changed the access pattern just
enough that you can now trigger it with 'writev()' with multiple
iovec's.
That commit itself was not the cause of the bug, it just allowed all the
stars to align just right that you could trigger the problem.
[ Side note: this is just the minimal fix to make the copy routines
(with __copy_from_user_inatomic_nocache as the particular version that
was involved in showing this) have the right return values.
We really should improve on the exceptional case further - to make the
copy do a byte-accurate copy up to the exact page limit that causes it
to fail. As it is, the callers have to do extra work to handle the
limit case gracefully. ]
Reported-by: Bron Gondwana <brong@fastmail.fm>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(which didn't have this problem), and since
most users that do the carethis was very hard to trigger, but
2008-06-18 08:47:50 +08:00
|
|
|
.Ls1e: addl $8,%eax /* eax is bytes left uncopied within the loop (Ls1e: 64 .. Ls8e: 8) */
|
2006-02-04 04:51:02 +08:00
|
|
|
.Ls2e: addl $8,%eax
|
|
|
|
.Ls3e: addl $8,%eax
|
|
|
|
.Ls4e: addl $8,%eax
|
|
|
|
.Ls5e: addl $8,%eax
|
|
|
|
.Ls6e: addl $8,%eax
|
|
|
|
.Ls7e: addl $8,%eax
|
|
|
|
.Ls8e: addl $8,%eax
|
|
|
|
addq %rbx,%rdi /* +64 */
|
|
|
|
subq %rax,%rdi /* correct destination with computed offset */
|
|
|
|
|
|
|
|
shlq $6,%rdx /* loop counter * 64 (stride length) */
|
|
|
|
addq %rax,%rdx /* add offset to loopcnt */
|
|
|
|
andl $63,%ecx /* remaining bytes */
|
|
|
|
addq %rcx,%rdx /* add them */
|
|
|
|
jmp .Lzero_rest
|
|
|
|
|
|
|
|
/* exception on quad word loop in tail handling */
|
|
|
|
/* ecx: loopcnt/8, %edx: length, rdi: correct */
|
|
|
|
.Le_quad:
|
|
|
|
shll $3,%ecx
|
|
|
|
andl $7,%edx
|
|
|
|
addl %ecx,%edx
|
|
|
|
/* edx: bytes to zero, rdi: dest, eax:zero */
|
|
|
|
.Lzero_rest:
|
2006-09-26 16:52:39 +08:00
|
|
|
cmpl $0,(%rsp)
|
|
|
|
jz .Le_zero
|
2006-02-04 04:51:02 +08:00
|
|
|
movq %rdx,%rcx
|
|
|
|
.Le_byte:
|
|
|
|
xorl %eax,%eax
|
|
|
|
.Le5: rep
|
|
|
|
stosb
|
|
|
|
/* when there is another exception while zeroing the rest just return */
|
|
|
|
.Le_zero:
|
|
|
|
movq %rdx,%rax
|
|
|
|
jmp .Lende
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ENDPROC
|
|
|
|
ENDPROC(copy_user_generic)
|
|
|
|
|
2006-02-04 04:51:02 +08:00
|
|
|
|
|
|
|
/* Some CPUs run faster using the string copy instructions.
|
|
|
|
This is also a lot simpler. Use them when possible.
|
|
|
|
Patch in jmps to this code instead of copying it fully
|
|
|
|
to avoid unwanted aliasing in the exception tables. */
|
|
|
|
|
|
|
|
/* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
2006-09-26 16:52:39 +08:00
|
|
|
* ecx zero flag
|
2006-02-04 04:51:02 +08:00
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
* eax uncopied bytes or 0 if successfull.
|
|
|
|
*
|
|
|
|
* Only 4GB of copy is supported. This shouldn't be a problem
|
|
|
|
* because the kernel normally only writes from/to page sized chunks
|
|
|
|
* even if user space passed a longer buffer.
|
|
|
|
* And more would be dangerous because both Intel and AMD have
|
|
|
|
* errata with rep movsq > 4GB. If someone feels the need to fix
|
|
|
|
* this please consider this.
|
2006-09-26 16:52:39 +08:00
|
|
|
*/
|
|
|
|
ENTRY(copy_user_generic_string)
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_STARTPROC
|
2006-09-26 16:52:39 +08:00
|
|
|
movl %ecx,%r8d /* save zero flag */
|
2005-04-17 06:20:36 +08:00
|
|
|
movl %edx,%ecx
|
|
|
|
shrl $3,%ecx
|
|
|
|
andl $7,%edx
|
2006-09-26 16:52:39 +08:00
|
|
|
jz 10f
|
2005-04-17 06:20:36 +08:00
|
|
|
1: rep
|
|
|
|
movsq
|
|
|
|
movl %edx,%ecx
|
|
|
|
2: rep
|
|
|
|
movsb
|
2006-09-26 16:52:39 +08:00
|
|
|
9: movl %ecx,%eax
|
2005-04-17 06:20:36 +08:00
|
|
|
ret
|
2006-09-26 16:52:39 +08:00
|
|
|
|
|
|
|
/* multiple of 8 byte */
|
|
|
|
10: rep
|
|
|
|
movsq
|
|
|
|
xor %eax,%eax
|
2005-04-17 06:20:36 +08:00
|
|
|
ret
|
2006-09-26 16:52:39 +08:00
|
|
|
|
|
|
|
/* exception handling */
|
|
|
|
3: lea (%rdx,%rcx,8),%rax /* exception on quad loop */
|
|
|
|
jmp 6f
|
|
|
|
5: movl %ecx,%eax /* exception on byte loop */
|
|
|
|
/* eax: left over bytes */
|
|
|
|
6: testl %r8d,%r8d /* zero flag set? */
|
|
|
|
jz 7f
|
|
|
|
movl %eax,%ecx /* initialize x86 loop counter */
|
|
|
|
push %rax
|
|
|
|
xorl %eax,%eax
|
|
|
|
8: rep
|
|
|
|
stosb /* zero the rest */
|
|
|
|
11: pop %rax
|
|
|
|
7: ret
|
2006-09-26 16:52:32 +08:00
|
|
|
CFI_ENDPROC
|
|
|
|
END(copy_user_generic_c)
|
2006-01-12 05:44:45 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.section __ex_table,"a"
|
|
|
|
.quad 1b,3b
|
2006-09-26 16:52:39 +08:00
|
|
|
.quad 2b,5b
|
|
|
|
.quad 8b,11b
|
|
|
|
.quad 10b,3b
|
2005-04-17 06:20:36 +08:00
|
|
|
.previous
|