x86/fpu: Clean up parameter order in the copy_xstate_to_*() APIs

Parameter ordering is weird:

  int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, struct xregs_state *xsave);
  int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf, struct xregs_state *xsave);

'pos' and 'count', which are attributes of the destination buffer, are listed before the destination
buffer itself ...

List them after the primary arguments instead.

This makes the code more similar to regular memcpy() variant APIs.

No change in functionality.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-6-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-09-23 14:59:48 +02:00
parent a69c158fb3
commit d7eda6c99c
3 changed files with 16 additions and 17 deletions

View File

@ -48,8 +48,8 @@ void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate); void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field); const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void); int using_compacted_format(void);
int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, struct xregs_state *xsave); int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int pos, unsigned int count);
int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf, struct xregs_state *xsave); int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int pos, unsigned int count);
int copy_user_to_xstate(const void *kbuf, const void __user *ubuf, int copy_user_to_xstate(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave); struct xregs_state *xsave);
#endif #endif

View File

@ -93,9 +93,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (using_compacted_format()) { if (using_compacted_format()) {
if (kbuf) if (kbuf)
ret = copy_xstate_to_kernel(pos, count, kbuf, xsave); ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
else else
ret = copy_xstate_to_user(pos, count, ubuf, xsave); ret = copy_xstate_to_user(ubuf, xsave, pos, count);
} else { } else {
fpstate_sanitize_xstate(fpu); fpstate_sanitize_xstate(fpu);
/* /*

View File

@ -925,10 +925,9 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
* the source data pointer or increment pos, count, kbuf, and ubuf. * the source data pointer or increment pos, count, kbuf, and ubuf.
*/ */
static inline int static inline int
__copy_xstate_to_kernel(unsigned int pos, unsigned int count, __copy_xstate_to_kernel(void *kbuf,
void *kbuf, const void *data,
const void *data, const int start_pos, unsigned int pos, unsigned int count, const int start_pos, const int end_pos)
const int end_pos)
{ {
if ((count == 0) || (pos < start_pos)) if ((count == 0) || (pos < start_pos))
return 0; return 0;
@ -948,7 +947,7 @@ __copy_xstate_to_kernel(unsigned int pos, unsigned int count,
* It supports partial copy but pos always starts from zero. This is called * It supports partial copy but pos always starts from zero. This is called
* from xstateregs_get() and there we check the CPU has XSAVES. * from xstateregs_get() and there we check the CPU has XSAVES.
*/ */
int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, struct xregs_state *xsave) int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int pos, unsigned int count)
{ {
unsigned int offset, size; unsigned int offset, size;
int ret, i; int ret, i;
@ -973,7 +972,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, stru
offset = offsetof(struct xregs_state, header); offset = offsetof(struct xregs_state, header);
size = sizeof(header); size = sizeof(header);
ret = __copy_xstate_to_kernel(offset, size, kbuf, &header, 0, count); ret = __copy_xstate_to_kernel(kbuf, &header, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;
@ -987,7 +986,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, stru
offset = xstate_offsets[i]; offset = xstate_offsets[i];
size = xstate_sizes[i]; size = xstate_sizes[i];
ret = __copy_xstate_to_kernel(offset, size, kbuf, src, 0, count); ret = __copy_xstate_to_kernel(kbuf, src, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;
@ -1003,7 +1002,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, stru
offset = offsetof(struct fxregs_state, sw_reserved); offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes); size = sizeof(xstate_fx_sw_bytes);
ret = __copy_xstate_to_kernel(offset, size, kbuf, xstate_fx_sw_bytes, 0, count); ret = __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;
@ -1011,7 +1010,7 @@ int copy_xstate_to_kernel(unsigned int pos, unsigned int count, void *kbuf, stru
} }
static inline int static inline int
__copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf, const void *data, const int start_pos, const int end_pos) __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, unsigned int count, const int start_pos, const int end_pos)
{ {
if ((count == 0) || (pos < start_pos)) if ((count == 0) || (pos < start_pos))
return 0; return 0;
@ -1031,7 +1030,7 @@ __copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf, c
* zero. This is called from xstateregs_get() and there we check the CPU * zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES. * has XSAVES.
*/ */
int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf, struct xregs_state *xsave) int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int pos, unsigned int count)
{ {
unsigned int offset, size; unsigned int offset, size;
int ret, i; int ret, i;
@ -1056,7 +1055,7 @@ int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf,
offset = offsetof(struct xregs_state, header); offset = offsetof(struct xregs_state, header);
size = sizeof(header); size = sizeof(header);
ret = __copy_xstate_to_user(offset, size, ubuf, &header, 0, count); ret = __copy_xstate_to_user(ubuf, &header, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;
@ -1070,7 +1069,7 @@ int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf,
offset = xstate_offsets[i]; offset = xstate_offsets[i];
size = xstate_sizes[i]; size = xstate_sizes[i];
ret = __copy_xstate_to_user(offset, size, ubuf, src, 0, count); ret = __copy_xstate_to_user(ubuf, src, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;
@ -1086,7 +1085,7 @@ int copy_xstate_to_user(unsigned int pos, unsigned int count, void __user *ubuf,
offset = offsetof(struct fxregs_state, sw_reserved); offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes); size = sizeof(xstate_fx_sw_bytes);
ret = __copy_xstate_to_user(offset, size, ubuf, xstate_fx_sw_bytes, 0, count); ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, 0, count);
if (ret) if (ret)
return ret; return ret;