x86/fpu: Clean up the parameter definitions of copy_xstate_to_*()
Remove pointless 'const' of non-pointer input parameter. Remove unnecessary parenthesis that shows uncertainty about arithmetic operator precedence. Clarify copy_xstate_to_user() description. No change in functionality. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Eric Biggers <ebiggers3@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yu-cheng Yu <yu-cheng.yu@intel.com> Link: http://lkml.kernel.org/r/20170923130016.21448-7-mingo@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
d7eda6c99c
commit
becb2bb72f
|
@ -927,13 +927,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
|||
static inline int
|
||||
__copy_xstate_to_kernel(void *kbuf,
|
||||
const void *data,
|
||||
unsigned int pos, unsigned int count, const int start_pos, const int end_pos)
|
||||
unsigned int pos, unsigned int count, int start_pos, int end_pos)
|
||||
{
|
||||
if ((count == 0) || (pos < start_pos))
|
||||
return 0;
|
||||
|
||||
if (end_pos < 0 || pos < end_pos) {
|
||||
unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
|
||||
unsigned int copy = end_pos < 0 ? count : min(count, end_pos - pos);
|
||||
|
||||
memcpy(kbuf + pos, data, copy);
|
||||
}
|
||||
|
@ -1010,13 +1010,13 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int po
|
|||
}
|
||||
|
||||
static inline int
|
||||
__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, unsigned int count, const int start_pos, const int end_pos)
|
||||
__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, unsigned int count, int start_pos, int end_pos)
|
||||
{
|
||||
if ((count == 0) || (pos < start_pos))
|
||||
return 0;
|
||||
|
||||
if (end_pos < 0 || pos < end_pos) {
|
||||
unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
|
||||
unsigned int copy = end_pos < 0 ? count : min(count, end_pos - pos);
|
||||
|
||||
if (__copy_to_user(ubuf + pos, data, copy))
|
||||
return -EFAULT;
|
||||
|
@ -1026,7 +1026,7 @@ __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, uns
|
|||
|
||||
/*
|
||||
* Convert from kernel XSAVES compacted format to standard format and copy
|
||||
* to a ptrace buffer. It supports partial copy but pos always starts from
|
||||
* to a user-space buffer. It supports partial copy but pos always starts from
|
||||
* zero. This is called from xstateregs_get() and there we check the CPU
|
||||
* has XSAVES.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue