[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/entry: Use 32bit xors rater than 64bit xors for clearing GPRs



Intel's Silvermont/Knights Landing architecture treats them as full ALU
operations, rather than zeroing idoms.

No functional change, and no change in code volume (only changing the bit
selection in the REX prefix).

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>

If anyone is interested, <20180211104949.12992-5-linux@xxxxxxxxxxxxxxxxxxxx>
is the LKML discusson on the subject.  It is most likely that this is a
deliberate simplification in the Knights* architecture because compilers
follow optimisation instructions of "use xors for zeroing" and "use 32bit
operations in preference to 64bit ones wherever possible".
---
 xen/include/asm-x86/asm_defns.h | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index aee14ba..6fc13d3 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -269,10 +269,10 @@ static always_inline void stac(void)
         movq  %r10,UREGS_r10(%rsp)
         movq  %r11,UREGS_r11(%rsp)
 .endif
-        xor   %r8, %r8
-        xor   %r9, %r9
-        xor   %r10, %r10
-        xor   %r11, %r11
+        xor   %r8d, %r8d
+        xor   %r9d, %r9d
+        xor   %r10d, %r10d
+        xor   %r11d, %r11d
         movq  %rbx,UREGS_rbx(%rsp)
         xor   %ebx, %ebx
         movq  %rbp,UREGS_rbp(%rsp)
@@ -289,10 +289,10 @@ static always_inline void stac(void)
         movq  %r14,UREGS_r14(%rsp)
         movq  %r15,UREGS_r15(%rsp)
 .endif
-        xor   %r12, %r12
-        xor   %r13, %r13
-        xor   %r14, %r14
-        xor   %r15, %r15
+        xor   %r12d, %r12d
+        xor   %r13d, %r13d
+        xor   %r14d, %r14d
+        xor   %r15d, %r15d
 .endm
 
 #define LOAD_ONE_REG(reg, compat) \
@@ -317,10 +317,10 @@ static always_inline void stac(void)
         movq  UREGS_r13(%rsp), %r13
         movq  UREGS_r12(%rsp), %r12
 .else
-        xor %r15, %r15
-        xor %r14, %r14
-        xor %r13, %r13
-        xor %r12, %r12
+        xor %r15d, %r15d
+        xor %r14d, %r14d
+        xor %r13d, %r13d
+        xor %r12d, %r12d
 .endif
         LOAD_ONE_REG(bp, \compat)
         LOAD_ONE_REG(bx, \compat)
@@ -330,10 +330,10 @@ static always_inline void stac(void)
         movq  UREGS_r9(%rsp),%r9
         movq  UREGS_r8(%rsp),%r8
 .else
-        xor %r11, %r11
-        xor %r10, %r10
-        xor %r9, %r9
-        xor %r8, %r8
+        xor %r11d, %r11d
+        xor %r10d, %r10d
+        xor %r9d, %r9d
+        xor %r8d, %r8d
 .endif
         LOAD_ONE_REG(ax, \compat)
         LOAD_ONE_REG(cx, \compat)
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.