[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [V12 1/4] x86/xsaves: using named operand instead numbered operand in xrstor



From: Shuai Ruan <shuai.ruan@xxxxxxxxx>

This is pre-req patch for latter xsaves patch. This patch introduce
a macro to handle restor fixup, also use named opreand instead of
numbered operand in restor fixup code.

Signed-off-by: Shuai Ruan <shuai.ruan@xxxxxxxxx>
---
 xen/arch/x86/xstate.c | 43 +++++++++++++++++++------------------------
 1 file changed, 19 insertions(+), 24 deletions(-)

diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 827e0e1..bb6d5e11 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -158,6 +158,16 @@ void xsave(struct vcpu *v, uint64_t mask)
         ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = word_size;
 }
 
+#define XRSTOR_FIXUP   ".section .fixup,\"ax\"           \n"    \
+                       "2: mov %[size],%%ecx             \n"    \
+                       "   xor %[lmask_out],%[lmask_out] \n"    \
+                       "   rep stosb                     \n"    \
+                       "   lea %[mem],%[ptr]             \n"    \
+                       "   mov %[lmask_in],%[lmask_out]  \n"    \
+                       "   jmp 1b                        \n"    \
+                       ".previous                        \n"    \
+                       _ASM_EXTABLE(1b, 2b)
+
 void xrstor(struct vcpu *v, uint64_t mask)
 {
     uint32_t hmask = mask >> 32;
@@ -188,38 +198,23 @@ void xrstor(struct vcpu *v, uint64_t mask)
     {
     default:
         asm volatile ( "1: .byte 0x48,0x0f,0xae,0x2f\n"
-                       ".section .fixup,\"ax\"      \n"
-                       "2: mov %5,%%ecx             \n"
-                       "   xor %1,%1                \n"
-                       "   rep stosb                \n"
-                       "   lea %2,%0                \n"
-                       "   mov %3,%1                \n"
-                       "   jmp 1b                   \n"
-                       ".previous                   \n"
-                       _ASM_EXTABLE(1b, 2b)
-                       : "+&D" (ptr), "+&a" (lmask)
-                       : "m" (*ptr), "g" (lmask), "d" (hmask),
-                         "m" (xsave_cntxt_size)
+                       XRSTOR_FIXUP
+                       : [ptr] "+&D" (ptr), [lmask_out] "+&a" (lmask)
+                       : [mem] "m" (*ptr), [lmask_in] "g" (lmask),
+                         [hmask] "d" (hmask), [size] "m" (xsave_cntxt_size)
                        : "ecx" );
         break;
     case 4: case 2:
         asm volatile ( "1: .byte 0x0f,0xae,0x2f\n"
-                       ".section .fixup,\"ax\" \n"
-                       "2: mov %5,%%ecx        \n"
-                       "   xor %1,%1           \n"
-                       "   rep stosb           \n"
-                       "   lea %2,%0           \n"
-                       "   mov %3,%1           \n"
-                       "   jmp 1b              \n"
-                       ".previous              \n"
-                       _ASM_EXTABLE(1b, 2b)
-                       : "+&D" (ptr), "+&a" (lmask)
-                       : "m" (*ptr), "g" (lmask), "d" (hmask),
-                         "m" (xsave_cntxt_size)
+                       XRSTOR_FIXUP
+                       : [ptr] "+&D" (ptr), [lmask_out] "+&a" (lmask)
+                       : [mem] "m" (*ptr), [lmask_in] "g" (lmask),
+                         [hmask] "d" (hmask), [size] "m" (xsave_cntxt_size)
                        : "ecx" );
         break;
     }
 }
+#undef XRSTOR_FIXUP
 
 bool_t xsave_enabled(const struct vcpu *v)
 {
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.