2008-07-02 21:53:13 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
|
|
|
|
* Copyright 2002 Andi Kleen, SuSE Labs.
|
2005-04-17 06:20:36 +08:00
|
|
|
* Subject to the GNU Public License v2.
|
2008-07-02 21:53:13 +08:00
|
|
|
*
|
|
|
|
* Functions to copy from and to user space.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-26 16:52:32 +08:00
|
|
|
#include <linux/linkage.h>
|
2006-09-26 16:52:39 +08:00
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/thread_info.h>
|
2016-01-27 05:12:04 +08:00
|
|
|
#include <asm/cpufeatures.h>
|
2011-05-18 06:29:15 +08:00
|
|
|
#include <asm/alternative-asm.h>
|
2012-04-21 03:19:51 +08:00
|
|
|
#include <asm/asm.h>
|
2012-09-22 03:43:12 +08:00
|
|
|
#include <asm/smap.h>
|
2016-01-12 00:04:34 +08:00
|
|
|
#include <asm/export.h>
|
2006-09-26 16:52:39 +08:00
|
|
|
|
2008-07-02 21:53:13 +08:00
|
|
|
/* Standard copy_to_user with segment limit checking */
|
2009-11-16 22:42:18 +08:00
|
|
|
ENTRY(_copy_to_user)
|
2016-07-15 04:22:57 +08:00
|
|
|
mov PER_CPU_VAR(current_task), %rax
|
2005-04-17 06:20:36 +08:00
|
|
|
movq %rdi,%rcx
|
|
|
|
addq %rdx,%rcx
|
2008-07-02 21:53:13 +08:00
|
|
|
jc bad_to_user
|
2016-07-15 04:22:57 +08:00
|
|
|
cmpq TASK_addr_limit(%rax),%rcx
|
x86, 64-bit: Fix copy_[to/from]_user() checks for the userspace address limit
As reported in BZ #30352:
https://bugzilla.kernel.org/show_bug.cgi?id=30352
there's a kernel bug related to reading the last allowed page on x86_64.
The _copy_to_user() and _copy_from_user() functions use the following
check for address limit:
if (buf + size >= limit)
fail();
while it should be more permissive:
if (buf + size > limit)
fail();
That's because the size represents the number of bytes being
read/write from/to buf address AND including the buf address.
So the copy function will actually never touch the limit
address even if "buf + size == limit".
Following program fails to use the last page as buffer
due to the wrong limit check:
#include <sys/mman.h>
#include <sys/socket.h>
#include <assert.h>
#define PAGE_SIZE (4096)
#define LAST_PAGE ((void*)(0x7fffffffe000))
int main()
{
int fds[2], err;
void * ptr = mmap(LAST_PAGE, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
assert(ptr == LAST_PAGE);
err = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds);
assert(err == 0);
err = send(fds[0], ptr, PAGE_SIZE, 0);
perror("send");
assert(err == PAGE_SIZE);
err = recv(fds[1], ptr, PAGE_SIZE, MSG_WAITALL);
perror("recv");
assert(err == PAGE_SIZE);
return 0;
}
The other place checking the addr limit is the access_ok() function,
which is working properly. There's just a misleading comment
for the __range_not_ok() macro - which this patch fixes as well.
The last page of the user-space address range is a guard page and
Brian Gerst observed that the guard page itself due to an erratum on K8 cpus
(#121 Sequential Execution Across Non-Canonical Boundary Causes Processor
Hang).
However, the test code is using the last valid page before the guard page.
The bug is that the last byte before the guard page can't be read
because of the off-by-one error. The guard page is left in place.
This bug would normally not show up because the last page is
part of the process stack and never accessed via syscalls.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Brian Gerst <brgerst@gmail.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: <stable@kernel.org>
Link: http://lkml.kernel.org/r/1305210630-7136-1-git-send-email-jolsa@redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-05-12 22:30:30 +08:00
|
|
|
ja bad_to_user
|
2015-01-13 08:38:17 +08:00
|
|
|
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
|
|
|
|
"jmp copy_user_generic_string", \
|
|
|
|
X86_FEATURE_REP_GOOD, \
|
|
|
|
"jmp copy_user_enhanced_fast_string", \
|
|
|
|
X86_FEATURE_ERMS
|
2009-11-16 22:42:18 +08:00
|
|
|
ENDPROC(_copy_to_user)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(_copy_to_user)
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2008-07-02 21:53:13 +08:00
|
|
|
/* Standard copy_from_user with segment limit checking */
|
2009-09-26 20:33:01 +08:00
|
|
|
ENTRY(_copy_from_user)
|
2016-07-15 04:22:57 +08:00
|
|
|
mov PER_CPU_VAR(current_task), %rax
|
2008-07-02 21:53:13 +08:00
|
|
|
movq %rsi,%rcx
|
|
|
|
addq %rdx,%rcx
|
|
|
|
jc bad_from_user
|
2016-07-15 04:22:57 +08:00
|
|
|
cmpq TASK_addr_limit(%rax),%rcx
|
x86, 64-bit: Fix copy_[to/from]_user() checks for the userspace address limit
As reported in BZ #30352:
https://bugzilla.kernel.org/show_bug.cgi?id=30352
there's a kernel bug related to reading the last allowed page on x86_64.
The _copy_to_user() and _copy_from_user() functions use the following
check for address limit:
if (buf + size >= limit)
fail();
while it should be more permissive:
if (buf + size > limit)
fail();
That's because the size represents the number of bytes being
read/write from/to buf address AND including the buf address.
So the copy function will actually never touch the limit
address even if "buf + size == limit".
Following program fails to use the last page as buffer
due to the wrong limit check:
#include <sys/mman.h>
#include <sys/socket.h>
#include <assert.h>
#define PAGE_SIZE (4096)
#define LAST_PAGE ((void*)(0x7fffffffe000))
int main()
{
int fds[2], err;
void * ptr = mmap(LAST_PAGE, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
assert(ptr == LAST_PAGE);
err = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds);
assert(err == 0);
err = send(fds[0], ptr, PAGE_SIZE, 0);
perror("send");
assert(err == PAGE_SIZE);
err = recv(fds[1], ptr, PAGE_SIZE, MSG_WAITALL);
perror("recv");
assert(err == PAGE_SIZE);
return 0;
}
The other place checking the addr limit is the access_ok() function,
which is working properly. There's just a misleading comment
for the __range_not_ok() macro - which this patch fixes as well.
The last page of the user-space address range is a guard page and
Brian Gerst observed that the guard page itself due to an erratum on K8 cpus
(#121 Sequential Execution Across Non-Canonical Boundary Causes Processor
Hang).
However, the test code is using the last valid page before the guard page.
The bug is that the last byte before the guard page can't be read
because of the off-by-one error. The guard page is left in place.
This bug would normally not show up because the last page is
part of the process stack and never accessed via syscalls.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Brian Gerst <brgerst@gmail.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: <stable@kernel.org>
Link: http://lkml.kernel.org/r/1305210630-7136-1-git-send-email-jolsa@redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-05-12 22:30:30 +08:00
|
|
|
ja bad_from_user
|
2015-01-13 08:38:17 +08:00
|
|
|
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
|
|
|
|
"jmp copy_user_generic_string", \
|
|
|
|
X86_FEATURE_REP_GOOD, \
|
|
|
|
"jmp copy_user_enhanced_fast_string", \
|
|
|
|
X86_FEATURE_ERMS
|
2009-09-26 20:33:01 +08:00
|
|
|
ENDPROC(_copy_from_user)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(_copy_from_user)
|
|
|
|
|
2006-09-26 16:52:39 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.section .fixup,"ax"
|
|
|
|
/* must zero dest */
|
2008-07-02 21:53:13 +08:00
|
|
|
ENTRY(bad_from_user)
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_from_user:
|
|
|
|
movl %edx,%ecx
|
|
|
|
xorl %eax,%eax
|
|
|
|
rep
|
|
|
|
stosb
|
|
|
|
bad_to_user:
|
2008-07-02 21:53:13 +08:00
|
|
|
movl %edx,%eax
|
2005-04-17 06:20:36 +08:00
|
|
|
ret
|
2008-07-02 21:53:13 +08:00
|
|
|
ENDPROC(bad_from_user)
|
2005-04-17 06:20:36 +08:00
|
|
|
.previous
|
2008-07-02 21:53:13 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-09-26 16:52:39 +08:00
|
|
|
* copy_user_generic_unrolled - memory copy with exception handling.
|
2008-07-02 21:53:13 +08:00
|
|
|
* This version is for CPUs like P4 that don't have efficient micro
|
|
|
|
* code for rep movsq
|
|
|
|
*
|
|
|
|
* Input:
|
2005-04-17 06:20:36 +08:00
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
|
|
|
*
|
2008-07-02 21:53:13 +08:00
|
|
|
* Output:
|
2011-03-18 03:24:16 +08:00
|
|
|
* eax uncopied bytes or 0 if successful.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-09-26 16:52:39 +08:00
|
|
|
ENTRY(copy_user_generic_unrolled)
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_STAC
|
2008-07-02 21:53:13 +08:00
|
|
|
cmpl $8,%edx
|
|
|
|
jb 20f /* less then 8 bytes, go to byte copy loop */
|
|
|
|
ALIGN_DESTINATION
|
|
|
|
movl %edx,%ecx
|
|
|
|
andl $63,%edx
|
|
|
|
shrl $6,%ecx
|
|
|
|
jz 17f
|
|
|
|
1: movq (%rsi),%r8
|
|
|
|
2: movq 1*8(%rsi),%r9
|
|
|
|
3: movq 2*8(%rsi),%r10
|
|
|
|
4: movq 3*8(%rsi),%r11
|
|
|
|
5: movq %r8,(%rdi)
|
|
|
|
6: movq %r9,1*8(%rdi)
|
|
|
|
7: movq %r10,2*8(%rdi)
|
|
|
|
8: movq %r11,3*8(%rdi)
|
|
|
|
9: movq 4*8(%rsi),%r8
|
|
|
|
10: movq 5*8(%rsi),%r9
|
|
|
|
11: movq 6*8(%rsi),%r10
|
|
|
|
12: movq 7*8(%rsi),%r11
|
|
|
|
13: movq %r8,4*8(%rdi)
|
|
|
|
14: movq %r9,5*8(%rdi)
|
|
|
|
15: movq %r10,6*8(%rdi)
|
|
|
|
16: movq %r11,7*8(%rdi)
|
2006-02-04 04:51:02 +08:00
|
|
|
leaq 64(%rsi),%rsi
|
|
|
|
leaq 64(%rdi),%rdi
|
|
|
|
decl %ecx
|
2008-07-02 21:53:13 +08:00
|
|
|
jnz 1b
|
|
|
|
17: movl %edx,%ecx
|
|
|
|
andl $7,%edx
|
|
|
|
shrl $3,%ecx
|
|
|
|
jz 20f
|
|
|
|
18: movq (%rsi),%r8
|
|
|
|
19: movq %r8,(%rdi)
|
2006-02-04 04:51:02 +08:00
|
|
|
leaq 8(%rsi),%rsi
|
2008-07-02 21:53:13 +08:00
|
|
|
leaq 8(%rdi),%rdi
|
|
|
|
decl %ecx
|
|
|
|
jnz 18b
|
|
|
|
20: andl %edx,%edx
|
|
|
|
jz 23f
|
2006-02-04 04:51:02 +08:00
|
|
|
movl %edx,%ecx
|
2008-07-02 21:53:13 +08:00
|
|
|
21: movb (%rsi),%al
|
|
|
|
22: movb %al,(%rdi)
|
2006-02-04 04:51:02 +08:00
|
|
|
incq %rsi
|
2008-07-02 21:53:13 +08:00
|
|
|
incq %rdi
|
2006-02-04 04:51:02 +08:00
|
|
|
decl %ecx
|
2008-07-02 21:53:13 +08:00
|
|
|
jnz 21b
|
|
|
|
23: xor %eax,%eax
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_CLAC
|
2006-02-04 04:51:02 +08:00
|
|
|
ret
|
|
|
|
|
2008-07-02 21:53:13 +08:00
|
|
|
.section .fixup,"ax"
|
|
|
|
30: shll $6,%ecx
|
|
|
|
addl %ecx,%edx
|
|
|
|
jmp 60f
|
2013-11-21 04:50:51 +08:00
|
|
|
40: leal (%rdx,%rcx,8),%edx
|
2008-07-02 21:53:13 +08:00
|
|
|
jmp 60f
|
|
|
|
50: movl %ecx,%edx
|
|
|
|
60: jmp copy_user_handle_tail /* ecx is zerorest also */
|
|
|
|
.previous
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2012-04-21 03:19:51 +08:00
|
|
|
_ASM_EXTABLE(1b,30b)
|
|
|
|
_ASM_EXTABLE(2b,30b)
|
|
|
|
_ASM_EXTABLE(3b,30b)
|
|
|
|
_ASM_EXTABLE(4b,30b)
|
|
|
|
_ASM_EXTABLE(5b,30b)
|
|
|
|
_ASM_EXTABLE(6b,30b)
|
|
|
|
_ASM_EXTABLE(7b,30b)
|
|
|
|
_ASM_EXTABLE(8b,30b)
|
|
|
|
_ASM_EXTABLE(9b,30b)
|
|
|
|
_ASM_EXTABLE(10b,30b)
|
|
|
|
_ASM_EXTABLE(11b,30b)
|
|
|
|
_ASM_EXTABLE(12b,30b)
|
|
|
|
_ASM_EXTABLE(13b,30b)
|
|
|
|
_ASM_EXTABLE(14b,30b)
|
|
|
|
_ASM_EXTABLE(15b,30b)
|
|
|
|
_ASM_EXTABLE(16b,30b)
|
|
|
|
_ASM_EXTABLE(18b,40b)
|
|
|
|
_ASM_EXTABLE(19b,40b)
|
|
|
|
_ASM_EXTABLE(21b,50b)
|
|
|
|
_ASM_EXTABLE(22b,50b)
|
2008-07-02 21:53:13 +08:00
|
|
|
ENDPROC(copy_user_generic_unrolled)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(copy_user_generic_unrolled)
|
2006-09-26 16:52:32 +08:00
|
|
|
|
2008-07-02 21:53:13 +08:00
|
|
|
/* Some CPUs run faster using the string copy instructions.
|
|
|
|
* This is also a lot simpler. Use them when possible.
|
|
|
|
*
|
|
|
|
* Only 4GB of copy is supported. This shouldn't be a problem
|
|
|
|
* because the kernel normally only writes from/to page sized chunks
|
|
|
|
* even if user space passed a longer buffer.
|
|
|
|
* And more would be dangerous because both Intel and AMD have
|
|
|
|
* errata with rep movsq > 4GB. If someone feels the need to fix
|
|
|
|
* this please consider this.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
* eax uncopied bytes or 0 if successful.
|
|
|
|
*/
|
2006-09-26 16:52:39 +08:00
|
|
|
ENTRY(copy_user_generic_string)
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_STAC
|
2008-07-02 21:53:13 +08:00
|
|
|
cmpl $8,%edx
|
|
|
|
jb 2f /* less than 8 bytes, go to byte copy loop */
|
|
|
|
ALIGN_DESTINATION
|
2005-04-17 06:20:36 +08:00
|
|
|
movl %edx,%ecx
|
|
|
|
shrl $3,%ecx
|
2008-07-02 21:53:13 +08:00
|
|
|
andl $7,%edx
|
|
|
|
1: rep
|
2006-09-26 16:52:39 +08:00
|
|
|
movsq
|
2008-07-02 21:53:13 +08:00
|
|
|
2: movl %edx,%ecx
|
|
|
|
3: rep
|
|
|
|
movsb
|
2013-11-17 04:37:01 +08:00
|
|
|
xorl %eax,%eax
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_CLAC
|
2005-04-17 06:20:36 +08:00
|
|
|
ret
|
2006-09-26 16:52:39 +08:00
|
|
|
|
2008-07-02 21:53:13 +08:00
|
|
|
.section .fixup,"ax"
|
2013-11-21 04:50:51 +08:00
|
|
|
11: leal (%rdx,%rcx,8),%ecx
|
2008-07-02 21:53:13 +08:00
|
|
|
12: movl %ecx,%edx /* ecx is zerorest also */
|
|
|
|
jmp copy_user_handle_tail
|
|
|
|
.previous
|
2006-01-12 05:44:45 +08:00
|
|
|
|
2012-04-21 03:19:51 +08:00
|
|
|
_ASM_EXTABLE(1b,11b)
|
|
|
|
_ASM_EXTABLE(3b,12b)
|
2008-07-02 21:53:13 +08:00
|
|
|
ENDPROC(copy_user_generic_string)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(copy_user_generic_string)
|
2011-05-18 06:29:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
|
|
|
|
* It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
* eax uncopied bytes or 0 if successful.
|
|
|
|
*/
|
|
|
|
ENTRY(copy_user_enhanced_fast_string)
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_STAC
|
2011-05-18 06:29:15 +08:00
|
|
|
movl %edx,%ecx
|
|
|
|
1: rep
|
|
|
|
movsb
|
2013-11-17 04:37:01 +08:00
|
|
|
xorl %eax,%eax
|
2012-09-22 03:43:12 +08:00
|
|
|
ASM_CLAC
|
2011-05-18 06:29:15 +08:00
|
|
|
ret
|
|
|
|
|
|
|
|
.section .fixup,"ax"
|
|
|
|
12: movl %ecx,%edx /* ecx is zerorest also */
|
|
|
|
jmp copy_user_handle_tail
|
|
|
|
.previous
|
|
|
|
|
2012-04-21 03:19:51 +08:00
|
|
|
_ASM_EXTABLE(1b,12b)
|
2011-05-18 06:29:15 +08:00
|
|
|
ENDPROC(copy_user_enhanced_fast_string)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
|
2015-05-14 01:42:24 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* copy_user_nocache - Uncached memory copy with exception handling
|
2016-02-12 05:24:16 +08:00
|
|
|
* This will force destination out of cache for more performance.
|
|
|
|
*
|
|
|
|
* Note: Cached memory copy is used when destination or size is not
|
|
|
|
* naturally aligned. That is:
|
|
|
|
* - Require 8-byte alignment when size is 8 bytes or larger.
|
2016-02-12 05:24:17 +08:00
|
|
|
* - Require 4-byte alignment when size is 4 bytes.
|
2015-05-14 01:42:24 +08:00
|
|
|
*/
|
|
|
|
ENTRY(__copy_user_nocache)
|
|
|
|
ASM_STAC
|
2016-02-12 05:24:16 +08:00
|
|
|
|
2016-02-12 05:24:17 +08:00
|
|
|
/* If size is less than 8 bytes, go to 4-byte copy */
|
2015-05-14 01:42:24 +08:00
|
|
|
cmpl $8,%edx
|
2016-02-12 05:24:17 +08:00
|
|
|
jb .L_4b_nocache_copy_entry
|
2016-02-12 05:24:16 +08:00
|
|
|
|
|
|
|
/* If destination is not 8-byte aligned, "cache" copy to align it */
|
2015-05-14 01:42:24 +08:00
|
|
|
ALIGN_DESTINATION
|
2016-02-12 05:24:16 +08:00
|
|
|
|
|
|
|
/* Set 4x8-byte copy count and remainder */
|
2015-05-14 01:42:24 +08:00
|
|
|
movl %edx,%ecx
|
|
|
|
andl $63,%edx
|
|
|
|
shrl $6,%ecx
|
2016-02-12 05:24:16 +08:00
|
|
|
jz .L_8b_nocache_copy_entry /* jump if count is 0 */
|
|
|
|
|
|
|
|
/* Perform 4x8-byte nocache loop-copy */
|
|
|
|
.L_4x8b_nocache_copy_loop:
|
2015-05-14 01:42:24 +08:00
|
|
|
1: movq (%rsi),%r8
|
|
|
|
2: movq 1*8(%rsi),%r9
|
|
|
|
3: movq 2*8(%rsi),%r10
|
|
|
|
4: movq 3*8(%rsi),%r11
|
|
|
|
5: movnti %r8,(%rdi)
|
|
|
|
6: movnti %r9,1*8(%rdi)
|
|
|
|
7: movnti %r10,2*8(%rdi)
|
|
|
|
8: movnti %r11,3*8(%rdi)
|
|
|
|
9: movq 4*8(%rsi),%r8
|
|
|
|
10: movq 5*8(%rsi),%r9
|
|
|
|
11: movq 6*8(%rsi),%r10
|
|
|
|
12: movq 7*8(%rsi),%r11
|
|
|
|
13: movnti %r8,4*8(%rdi)
|
|
|
|
14: movnti %r9,5*8(%rdi)
|
|
|
|
15: movnti %r10,6*8(%rdi)
|
|
|
|
16: movnti %r11,7*8(%rdi)
|
|
|
|
leaq 64(%rsi),%rsi
|
|
|
|
leaq 64(%rdi),%rdi
|
|
|
|
decl %ecx
|
2016-02-12 05:24:16 +08:00
|
|
|
jnz .L_4x8b_nocache_copy_loop
|
|
|
|
|
|
|
|
/* Set 8-byte copy count and remainder */
|
|
|
|
.L_8b_nocache_copy_entry:
|
|
|
|
movl %edx,%ecx
|
2015-05-14 01:42:24 +08:00
|
|
|
andl $7,%edx
|
|
|
|
shrl $3,%ecx
|
2016-02-12 05:24:17 +08:00
|
|
|
jz .L_4b_nocache_copy_entry /* jump if count is 0 */
|
2016-02-12 05:24:16 +08:00
|
|
|
|
|
|
|
/* Perform 8-byte nocache loop-copy */
|
|
|
|
.L_8b_nocache_copy_loop:
|
|
|
|
20: movq (%rsi),%r8
|
|
|
|
21: movnti %r8,(%rdi)
|
2015-05-14 01:42:24 +08:00
|
|
|
leaq 8(%rsi),%rsi
|
|
|
|
leaq 8(%rdi),%rdi
|
|
|
|
decl %ecx
|
2016-02-12 05:24:16 +08:00
|
|
|
jnz .L_8b_nocache_copy_loop
|
|
|
|
|
|
|
|
/* If no byte left, we're done */
|
2016-02-12 05:24:17 +08:00
|
|
|
.L_4b_nocache_copy_entry:
|
|
|
|
andl %edx,%edx
|
|
|
|
jz .L_finish_copy
|
|
|
|
|
|
|
|
/* If destination is not 4-byte aligned, go to byte copy: */
|
|
|
|
movl %edi,%ecx
|
|
|
|
andl $3,%ecx
|
|
|
|
jnz .L_1b_cache_copy_entry
|
|
|
|
|
|
|
|
/* Set 4-byte copy count (1 or 0) and remainder */
|
2015-05-14 01:42:24 +08:00
|
|
|
movl %edx,%ecx
|
2016-02-12 05:24:17 +08:00
|
|
|
andl $3,%edx
|
|
|
|
shrl $2,%ecx
|
|
|
|
jz .L_1b_cache_copy_entry /* jump if count is 0 */
|
|
|
|
|
|
|
|
/* Perform 4-byte nocache copy: */
|
|
|
|
30: movl (%rsi),%r8d
|
|
|
|
31: movnti %r8d,(%rdi)
|
|
|
|
leaq 4(%rsi),%rsi
|
|
|
|
leaq 4(%rdi),%rdi
|
|
|
|
|
|
|
|
/* If no bytes left, we're done: */
|
2016-02-12 05:24:16 +08:00
|
|
|
andl %edx,%edx
|
|
|
|
jz .L_finish_copy
|
|
|
|
|
|
|
|
/* Perform byte "cache" loop-copy for the remainder */
|
2016-02-12 05:24:17 +08:00
|
|
|
.L_1b_cache_copy_entry:
|
2015-05-14 01:42:24 +08:00
|
|
|
movl %edx,%ecx
|
2016-02-12 05:24:16 +08:00
|
|
|
.L_1b_cache_copy_loop:
|
|
|
|
40: movb (%rsi),%al
|
|
|
|
41: movb %al,(%rdi)
|
2015-05-14 01:42:24 +08:00
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
2016-02-12 05:24:16 +08:00
|
|
|
jnz .L_1b_cache_copy_loop
|
|
|
|
|
|
|
|
/* Finished copying; fence the prior stores */
|
|
|
|
.L_finish_copy:
|
|
|
|
xorl %eax,%eax
|
2015-05-14 01:42:24 +08:00
|
|
|
ASM_CLAC
|
|
|
|
sfence
|
|
|
|
ret
|
|
|
|
|
|
|
|
.section .fixup,"ax"
|
2016-02-12 05:24:16 +08:00
|
|
|
.L_fixup_4x8b_copy:
|
|
|
|
shll $6,%ecx
|
2015-05-14 01:42:24 +08:00
|
|
|
addl %ecx,%edx
|
2016-02-12 05:24:16 +08:00
|
|
|
jmp .L_fixup_handle_tail
|
|
|
|
.L_fixup_8b_copy:
|
|
|
|
lea (%rdx,%rcx,8),%rdx
|
|
|
|
jmp .L_fixup_handle_tail
|
2016-02-12 05:24:17 +08:00
|
|
|
.L_fixup_4b_copy:
|
|
|
|
lea (%rdx,%rcx,4),%rdx
|
|
|
|
jmp .L_fixup_handle_tail
|
2016-02-12 05:24:16 +08:00
|
|
|
.L_fixup_1b_copy:
|
|
|
|
movl %ecx,%edx
|
|
|
|
.L_fixup_handle_tail:
|
|
|
|
sfence
|
2015-05-14 01:42:24 +08:00
|
|
|
jmp copy_user_handle_tail
|
|
|
|
.previous
|
|
|
|
|
2016-02-12 05:24:16 +08:00
|
|
|
_ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
|
|
|
|
_ASM_EXTABLE(20b,.L_fixup_8b_copy)
|
|
|
|
_ASM_EXTABLE(21b,.L_fixup_8b_copy)
|
2016-02-12 05:24:17 +08:00
|
|
|
_ASM_EXTABLE(30b,.L_fixup_4b_copy)
|
|
|
|
_ASM_EXTABLE(31b,.L_fixup_4b_copy)
|
2016-02-12 05:24:16 +08:00
|
|
|
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
|
|
|
|
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
|
2015-05-14 01:42:24 +08:00
|
|
|
ENDPROC(__copy_user_nocache)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(__copy_user_nocache)
|