|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] Ping: [PATCH] x86: streamline copying to/from user memory
>>> On 06.12.16 at 12:25, <JBeulich@xxxxxxxx> wrote:
> Their size parameters being "unsigned", there's neither a point for
> them returning "unsigned long", nor for any of their (assembly)
> arithmetic to involved 64-bit operations on other than addresses.
>
> Take the opportunity and fold __do_clear_user() into its single user
> (using qword stores instead of dword ones), name all asm() operands,
> and reduce the amount of (redundant) operands.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Ping?
> --- a/xen/arch/x86/usercopy.c
> +++ b/xen/arch/x86/usercopy.c
> @@ -10,87 +10,88 @@
> #include <xen/sched.h>
> #include <asm/uaccess.h>
>
> -unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned
> n)
> +unsigned __copy_to_user_ll(void __user *to, const void *from, unsigned n)
> {
> - unsigned long __d0, __d1, __d2, __n = n;
> + unsigned dummy;
>
> stac();
> asm volatile (
> - " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
> + " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
> " jbe 1f\n"
> - " mov %1,%0\n"
> - " neg %0\n"
> - " and $"STR(BYTES_PER_LONG-1)",%0\n"
> - " sub %0,%3\n"
> + " mov %k[to], %[cnt]\n"
> + " neg %[cnt]\n"
> + " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
> + " sub %[cnt], %[aux]\n"
> "4: rep movsb\n" /* make 'to' address aligned */
> - " mov %3,%0\n"
> - " shr $"STR(LONG_BYTEORDER)",%0\n"
> - " and $"STR(BYTES_PER_LONG-1)",%3\n"
> + " mov %[aux], %[cnt]\n"
> + " shr $"STR(LONG_BYTEORDER)", %[cnt]\n"
> + " and $"STR(BYTES_PER_LONG-1)", %[aux]\n"
> " .align 2,0x90\n"
> "0: rep movs"__OS"\n" /* as many words as possible... */
> - " mov %3,%0\n"
> + " mov %[aux],%[cnt]\n"
> "1: rep movsb\n" /* ...remainder copied as bytes */
> "2:\n"
> ".section .fixup,\"ax\"\n"
> - "5: add %3,%0\n"
> + "5: add %[aux], %[cnt]\n"
> " jmp 2b\n"
> - "3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
> + "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
> " jmp 2b\n"
> ".previous\n"
> _ASM_EXTABLE(4b, 5b)
> _ASM_EXTABLE(0b, 3b)
> _ASM_EXTABLE(1b, 2b)
> - : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
> - : "0" (__n), "1" (to), "2" (from), "3" (__n)
> + : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
> + [aux] "=&r" (dummy)
> + : "[aux]" (n)
> : "memory" );
> clac();
>
> - return __n;
> + return n;
> }
>
> -unsigned long
> -__copy_from_user_ll(void *to, const void __user *from, unsigned n)
> +unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n)
> {
> - unsigned long __d0, __d1, __d2, __n = n;
> + unsigned dummy;
>
> stac();
> asm volatile (
> - " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
> + " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
> " jbe 1f\n"
> - " mov %1,%0\n"
> - " neg %0\n"
> - " and $"STR(BYTES_PER_LONG-1)",%0\n"
> - " sub %0,%3\n"
> - "4: rep; movsb\n" /* make 'to' address aligned */
> - " mov %3,%0\n"
> - " shr $"STR(LONG_BYTEORDER)",%0\n"
> - " and $"STR(BYTES_PER_LONG-1)",%3\n"
> + " mov %k[to], %[cnt]\n"
> + " neg %[cnt]\n"
> + " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
> + " sub %[cnt], %[aux]\n"
> + "4: rep movsb\n" /* make 'to' address aligned */
> + " mov %[aux],%[cnt]\n"
> + " shr $"STR(LONG_BYTEORDER)", %[cnt]\n"
> + " and $"STR(BYTES_PER_LONG-1)", %[aux]\n"
> " .align 2,0x90\n"
> - "0: rep; movs"__OS"\n" /* as many words as possible... */
> - " mov %3,%0\n"
> - "1: rep; movsb\n" /* ...remainder copied as bytes */
> + "0: rep movs"__OS"\n" /* as many words as possible... */
> + " mov %[aux], %[cnt]\n"
> + "1: rep movsb\n" /* ...remainder copied as bytes */
> "2:\n"
> ".section .fixup,\"ax\"\n"
> - "5: add %3,%0\n"
> + "5: add %[aux], %[cnt]\n"
> " jmp 6f\n"
> - "3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
> - "6: push %0\n"
> - " push %%"__OP"ax\n"
> - " xor %%eax,%%eax\n"
> - " rep; stosb\n"
> - " pop %%"__OP"ax\n"
> - " pop %0\n"
> + "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
> + "6: mov %[cnt], %k[from]\n"
> + " xchg %%eax, %[aux]\n"
> + " xor %%eax, %%eax\n"
> + " rep stosb\n"
> + " xchg %[aux], %%eax\n"
> + " mov %k[from], %[cnt]\n"
> " jmp 2b\n"
> ".previous\n"
> _ASM_EXTABLE(4b, 5b)
> _ASM_EXTABLE(0b, 3b)
> _ASM_EXTABLE(1b, 6b)
> - : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
> - : "0" (__n), "1" (to), "2" (from), "3" (__n)
> + : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
> + [aux] "=&r" (dummy)
> + : "[aux]" (n)
> : "memory" );
> clac();
>
> - return __n;
> + return n;
> }
>
> /**
> @@ -106,34 +107,13 @@ __copy_from_user_ll(void *to, const void
> * Returns number of bytes that could not be copied.
> * On success, this will be zero.
> */
> -unsigned long
> -copy_to_user(void __user *to, const void *from, unsigned n)
> +unsigned copy_to_user(void __user *to, const void *from, unsigned n)
> {
> if ( access_ok(to, n) )
> n = __copy_to_user(to, from, n);
> return n;
> }
>
> -#define __do_clear_user(addr,size) \
> -do { \
> - long __d0; \
> - stac(); \
> - __asm__ __volatile__( \
> - "0: rep; stosl\n" \
> - " movl %2,%0\n" \
> - "1: rep; stosb\n" \
> - "2:\n" \
> - ".section .fixup,\"ax\"\n" \
> - "3: lea 0(%2,%0,4),%0\n" \
> - " jmp 2b\n" \
> - ".previous\n" \
> - _ASM_EXTABLE(0b,3b) \
> - _ASM_EXTABLE(1b,2b) \
> - : "=&c"(size), "=&D" (__d0) \
> - : "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0));
> \
> - clac(); \
> -} while (0)
> -
> /**
> * clear_user: - Zero a block of memory in user space.
> * @to: Destination address, in user space.
> @@ -144,12 +124,29 @@ do {
> \
> * Returns number of bytes that could not be cleared.
> * On success, this will be zero.
> */
> -unsigned long
> -clear_user(void __user *to, unsigned n)
> +unsigned clear_user(void __user *to, unsigned n)
> {
> - if ( access_ok(to, n) )
> - __do_clear_user(to, n);
> - return n;
> + if ( access_ok(to, n) )
> + {
> + stac();
> + asm volatile (
> + "0: rep stos"__OS"\n"
> + " mov %[bytes], %[cnt]\n"
> + "1: rep stosb\n"
> + "2:\n"
> + ".section .fixup,\"ax\"\n"
> + "3: lea (%q[bytes], %q[longs], "STR(BYTES_PER_LONG)"),
> %[cnt]\n"
> + " jmp 2b\n"
> + ".previous\n"
> + _ASM_EXTABLE(0b,3b)
> + _ASM_EXTABLE(1b,2b)
> + : [cnt] "=&c" (n), [to] "+D" (to)
> + : [bytes] "g" (n & (BYTES_PER_LONG - 1)),
> + [longs] "0" (n / BYTES_PER_LONG), "a" (0) );
> + clac();
> + }
> +
> + return n;
> }
>
> /**
> @@ -168,8 +165,7 @@ clear_user(void __user *to, unsigned n)
> * If some data could not be copied, this function will pad the copied
> * data to the requested size using zero bytes.
> */
> -unsigned long
> -copy_from_user(void *to, const void __user *from, unsigned n)
> +unsigned copy_from_user(void *to, const void __user *from, unsigned n)
> {
> if ( access_ok(from, n) )
> n = __copy_from_user(to, from, n);
> --- a/xen/include/asm-x86/uaccess.h
> +++ b/xen/include/asm-x86/uaccess.h
> @@ -11,12 +11,12 @@
>
> #include <asm/x86_64/uaccess.h>
>
> -unsigned long copy_to_user(void *to, const void *from, unsigned len);
> -unsigned long clear_user(void *to, unsigned len);
> -unsigned long copy_from_user(void *to, const void *from, unsigned len);
> +unsigned copy_to_user(void *to, const void *from, unsigned len);
> +unsigned clear_user(void *to, unsigned len);
> +unsigned copy_from_user(void *to, const void *from, unsigned len);
> /* Handles exceptions in both to and from, but doesn't do access_ok */
> -unsigned long __copy_to_user_ll(void *to, const void *from, unsigned n);
> -unsigned long __copy_from_user_ll(void *to, const void *from, unsigned n);
> +unsigned __copy_to_user_ll(void __user*to, const void *from, unsigned n);
> +unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned
> n);
>
> extern long __get_user_bad(void);
> extern void __put_user_bad(void);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |